Exemple #1
0
 def setUp(self):
     super(TestConnection, self).setUp()
     factory = Factory()
     self.job = factory.create_ssh_job('sample_jobs/ssh-deploy.yaml',
                                       mkdtemp())
     self.guest_job = factory.create_bbb_job(
         'sample_jobs/bbb-ssh-guest.yaml', mkdtemp())
Exemple #2
0
 def test_compatibility(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
     pipe = job.describe()
     self.assertEqual(pipe['compatibility'], DeployImages.compatibility)
     self.assertEqual(job.compatibility, DeployImages.compatibility)
     kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml')
     job_def = yaml.load(open(kvm_yaml, 'r'))
     job_def['compatibility'] = job.compatibility
     parser = JobParser()
     device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml'))
     try:
         job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
     except NotImplementedError:
         # some deployments listed in basics.yaml are not implemented yet
         pass
     self.assertIsNotNone(job)
     job_def['compatibility'] = job.compatibility + 1
     self.assertRaises(
         JobError, parser.parse, yaml.dump(job_def), device, 4212, None, mkdtemp()
     )
     job_def['compatibility'] = 0
     try:
         job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
     except NotImplementedError:
         # some deployments listed in basics.yaml are not implemented yet
         pass
     self.assertIsNotNone(job)
Exemple #3
0
    def test_compatibility(self):
        """
        Test compatibility support.

        The class to use in the comparison will change according to which class
        is related to the change which caused the compatibility to be modified.
        """
        factory = Factory()
        job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
        pipe = job.describe()
        self.assertEqual(pipe['compatibility'], ExpectShellSession.compatibility)
        self.assertEqual(job.compatibility, ExpectShellSession.compatibility)
        kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml')
        job_def = yaml.load(open(kvm_yaml, 'r'))
        job_def['compatibility'] = job.compatibility
        parser = JobParser()
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml'))
        try:
            job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
        except NotImplementedError:
            # some deployments listed in basics.yaml are not implemented yet
            pass
        self.assertIsNotNone(job)
        job_def['compatibility'] = job.compatibility + 1
        self.assertRaises(
            JobError, parser.parse, yaml.dump(job_def), device, 4212, None, mkdtemp()
        )
        job_def['compatibility'] = 0
        try:
            job = parser.parse(yaml.dump(job_def), device, 4212, None, output_dir=mkdtemp())
        except NotImplementedError:
            # some deployments listed in basics.yaml are not implemented yet
            pass
        self.assertIsNotNone(job)
Exemple #4
0
 def setUp(self):
     super(TestConnection, self).setUp()
     factory = ConnectionFactory()
     self.job = factory.create_ssh_job('sample_jobs/ssh-deploy.yaml',
                                       mkdtemp())
     self.guest_job = factory.create_bbb_job(
         'sample_jobs/bbb-ssh-guest.yaml', mkdtemp())
     logging.getLogger('dispatcher').addHandler(logging.NullHandler())
Exemple #5
0
    def test_compatibility(self):
        """
        Test compatibility support.

        The class to use in the comparison will change according to which class
        is related to the change which caused the compatibility to be modified.
        """
        factory = Factory()
        job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
        pipe = job.describe()
        self.assertEqual(pipe['compatibility'], DeployImages.compatibility)
        self.assertEqual(job.compatibility, DeployImages.compatibility)
        kvm_yaml = os.path.join(os.path.dirname(__file__),
                                'sample_jobs/kvm.yaml')
        with open(kvm_yaml, 'r') as kvm_yaml:
            job_def = yaml.load(kvm_yaml)
        job_def['compatibility'] = job.compatibility
        parser = JobParser()
        device = NewDevice(
            os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml'))
        try:
            job = parser.parse(yaml.dump(job_def),
                               device,
                               4212,
                               None,
                               None,
                               None,
                               output_dir=mkdtemp())
        except NotImplementedError:
            # some deployments listed in basics.yaml are not implemented yet
            pass
        self.assertIsNotNone(job)
        job_def['compatibility'] = job.compatibility + 1
        self.assertRaises(JobError, parser.parse, yaml.dump(job_def), device,
                          4212, None, None, None, mkdtemp())
        job_def['compatibility'] = 0
        try:
            job = parser.parse(yaml.dump(job_def),
                               device,
                               4212,
                               None,
                               None,
                               None,
                               output_dir=mkdtemp())
        except NotImplementedError:
            # some deployments listed in basics.yaml are not implemented yet
            pass
        self.assertIsNotNone(job)
 def run(self, connection, args=None):
     connection = super(ApplyOverlayTftp, self).run(connection, args)
     overlay_file = None
     directory = None
     nfs_url = None
     if self.parameters.get('nfsrootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'nfsroot')
         self.logger.info("Applying overlay to NFS")
     elif self.parameters.get('nfs_url', None) is not None:
         nfs_url = self.parameters.get('nfs_url')
         overlay_file = self.data['compress-overlay'].get('output')
         self.logger.info("Applying overlay to persistent NFS")
         # need to mount the persistent NFS here.
         directory = mkdtemp(autoremove=False)
         try:
             subprocess.check_output(['mount', '-t', 'nfs', nfs_url, directory])
         except subprocess.CalledProcessError as exc:
             raise JobError(exc)
     elif self.parameters.get('ramdisk', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
         self.logger.info("Applying overlay to ramdisk")
     elif self.parameters.get('rootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'root')
     else:
         self.logger.debug("No overlay directory")
         self.logger.debug(self.parameters)
     untar_file(overlay_file, directory)
     if nfs_url:
         subprocess.check_output(['umount', directory])
         os.rmdir(directory)  # fails if the umount fails
     return connection
 def test_fastboot_lxc(self):
     job = self.factory.create_hikey_job('sample_jobs/hi6220-hikey.yaml',
                                         mkdtemp())
     description_ref = self.pipeline_reference('hi6220-hikey.yaml', job=job)
     self.assertEqual(description_ref, job.pipeline.describe(False))
     uefi_menu = [action for action in job.pipeline.actions if action.name == 'uefi-menu-action'][0]
     self.assertIn('commands', uefi_menu.parameters)
     self.assertIn('fastboot', uefi_menu.parameters['commands'])
     self.assertEqual(
         job.device.pre_power_command,
         '/usr/local/lab-scripts/usb_hub_control -p 8000 -m sync -u 06')
     lxc_deploy = [action for action in job.pipeline.actions if action.name == 'lxc-deploy'][0]
     overlay = [action for action in lxc_deploy.internal_pipeline.actions if action.name == 'lava-overlay'][0]
     testdef = [action for action in overlay.internal_pipeline.actions if action.name == 'test-definition'][0]
     job.validate()
     self.assertEqual(
         {
             '1.8.3.20': '4_android-optee',
             '1.8.3.4': '0_get-adb-serial',
             '1.8.3.12': '2_android-busybox',
             '1.8.3.8': '1_android-meminfo',
             '1.8.3.16': '3_android-ping-dns'},
         testdef.get_namespace_data(action='test-runscript-overlay', label='test-runscript-overlay', key='testdef_levels'))
     for testdef in testdef.test_list[0]:
         self.assertEqual('git', testdef['from'])
 def test_basic_actions(self):
     factory = Factory()
     job = factory.create_fake_qemu_job(mkdtemp())
     if not job:
         return unittest.skip("not all deployments have been implemented")
     self.assertIsInstance(job, Job)
     self.assertIsInstance(job.pipeline, Pipeline)
Exemple #9
0
 def run(self, connection, args=None):
     """
     Check if a lava-test-shell has been requested, implement the overlay
     * create test runner directories beneath the temporary location
     * copy runners into test runner directories
     """
     self.data[self.name].setdefault('location', mkdtemp())
     self.logger.debug("Preparing overlay tarball in %s" % self.data[self.name]['location'])
     if 'lava_test_results_dir' not in self.data:
         self.logger.error("Unable to identify lava test results directory - missing OS type?")
         return connection
     lava_path = os.path.abspath("%s/%s" % (self.data[self.name]['location'], self.data['lava_test_results_dir']))
     for runner_dir in ['bin', 'tests', 'results']:
         # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
         path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
         if not os.path.exists(path):
             os.makedirs(path, 0755)
             self.logger.debug("makedir: %s" % path)
     for fname in self.scripts_to_copy:
         with open(fname, 'r') as fin:
             output_file = '%s/bin/%s' % (lava_path, os.path.basename(fname))
             self.logger.debug("Creating %s" % output_file)
             with open(output_file, 'w') as fout:
                 fout.write("#!%s\n\n" % self.parameters['deployment_data']['lava_test_sh_cmd'])
                 fout.write(fin.read())
                 os.fchmod(fout.fileno(), self.xmod)
     connection = super(OverlayAction, self).run(connection, args)
     return connection
 def setUp(self):
     self.parameters = {
         "job_name": "fakejob",
         'output_dir': mkdtemp(),
         'timeouts': {
             'job': {
                 'seconds': 3
             }
         },
         "actions": [
             {
                 'deploy': {
                     'failure_retry': 3
                 },
                 'boot': {
                     'failure_retry': 4
                 },
                 'test': {
                     'failure_retry': 5
                 }
             }
         ]
     }
     self.fakejob = TestTimeout.FakeJob(self.parameters)
     # copy of the _timeout function from parser.
     if 'timeouts' in self.parameters:
         if 'job' in self.parameters['timeouts']:
             duration = Timeout.parse(self.parameters['timeouts']['job'])
             self.fakejob.timeout = Timeout(self.parameters['job_name'], duration)
Exemple #11
0
 def run(self, connection, args=None):
     connection = super(AdbOverlayUnpack, self).run(connection, args)
     serial_number = self.job.device['adb_serial_number']
     overlay_type = 'adb-overlay'
     overlay_file = self.data['compress-overlay'].get('output')
     host_dir = mkdtemp()
     target_dir = ANDROID_TMP_DIR
     try:
         tar = tarfile.open(overlay_file)
         tar.extractall(host_dir)
         tar.close()
     except tarfile.TarError as exc:
         raise RuntimeError("Unable to unpack %s overlay: %s" % (
             overlay_type, exc))
     host_dir = os.path.join(host_dir, 'data/local/tmp')
     adb_cmd = ['adb', '-s', serial_number, 'push', host_dir,
                target_dir]
     command_output = self.run_command(adb_cmd)
     if command_output and 'pushed' not in command_output:
         raise JobError("Unable to push overlay files with adb: %s" %
                        command_output)
     adb_cmd = ['adb', '-s', serial_number, 'shell', '/system/bin/chmod',
                '0777', target_dir]
     command_output = self.run_command(adb_cmd)
     if command_output and 'pushed' not in command_output:
         raise JobError("Unable to chmod overlay files with adb: %s" %
                        command_output)
     self.data['boot-result'] = 'failed' if self.errors else 'success'
     return connection
Exemple #12
0
 def run(self, connection, max_end_time, args=None):
     connection = super(FlashCMSISAction, self).run(connection,
                                                    max_end_time, args)
     dstdir = mkdtemp()
     mount_command = "mount -t vfat %s %s" % (self.usb_mass_device, dstdir)
     self.run_command(mount_command.split(' '), allow_silent=True)
     # mount
     for f in self.filelist:
         self.logger.debug("Copying %s to %s", f, dstdir)
         shutil.copy2(f, dstdir)
     # umount
     umount_command = "umount %s" % self.usb_mass_device
     self.run_command(umount_command.split(' '), allow_silent=True)
     if self.errors:
         raise InfrastructureError("Unable to (un)mount USB device: %s" %
                                   self.usb_mass_device)
     res = 'failed' if self.errors else 'success'
     self.set_namespace_data(action='boot',
                             label='shared',
                             key='boot-result',
                             value=res)
     self.set_namespace_data(action='shared',
                             label='shared',
                             key='connection',
                             value=connection)
     return connection
 def run(self, connection, args=None):
     if not self.parameters.get(self.param_key, None):  # idempotency
         return connection
     connection = super(ExtractRootfs, self).run(connection, args)
     root = self.data['download_action'][self.param_key]['file']
     root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     if self.use_tarfile:
         try:
             tar = tarfile.open(root)
             tar.extractall(root_dir)
             tar.close()
         except tarfile.TarError as exc:
             raise JobError("Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc))
     elif self.use_lzma:
         with contextlib.closing(lzma.LZMAFile(root)) as xz:
             with tarfile.open(fileobj=xz) as tarball:
                 try:
                     tarball.extractall(root_dir)
                 except tarfile.TarError as exc:
                     raise JobError("Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc))
     else:
         raise RuntimeError("Unable to decompress %s: '%s'" % (self.param_key, os.path.basename(root)))
     self.set_common_data('file', self.file_key, root_dir)
     self.logger.debug("Extracted %s to %s" % (self.file_key, root_dir))
     return connection
Exemple #14
0
 def test_zimage_nobootz(self):
     # drop bootz from the device for this part of the test
     del self.device['parameters']['bootz']
     self.deploy_block['kernel']['type'] = 'zimage'
     job = self.parser.parse(yaml.dump(self.base_data),
                             self.device,
                             4212,
                             None,
                             "",
                             output_dir=mkdtemp())
     job.logger = DummyLogger()
     job.validate()
     deploy = [
         action for action in job.pipeline.actions
         if action.name == 'tftp-deploy'
     ][0]
     overlay = [
         action for action in deploy.internal_pipeline.actions
         if action.name == 'prepare-tftp-overlay'
     ][0]
     prepare = [
         action for action in overlay.internal_pipeline.actions
         if action.name == 'prepare-kernel'
     ][0]
     uboot_prepare = [
         action for action in prepare.internal_pipeline.actions
         if action.name == 'uboot-prepare-kernel'
     ][0]
     self.assertEqual('zimage', uboot_prepare.kernel_type)
     self.assertEqual('bootm', uboot_prepare.bootcommand)
     self.assertTrue(uboot_prepare.mkimage_conversion)
Exemple #15
0
 def run(self, connection, args=None):
     if not self.parameters.get(self.param_key, None):  # idempotency
         return connection
     connection = super(ExtractRootfs, self).run(connection, args)
     root = self.data['download_action'][self.param_key]['file']
     root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     if self.use_tarfile:
         try:
             tar = tarfile.open(root)
             tar.extractall(root_dir)
             tar.close()
         except tarfile.TarError as exc:
             raise JobError("Unable to unpack %s: '%s' - %s" %
                            (self.param_key, os.path.basename(root), exc))
     elif self.use_lzma:
         with contextlib.closing(lzma.LZMAFile(root)) as xz:
             with tarfile.open(fileobj=xz) as tarball:
                 try:
                     tarball.extractall(root_dir)
                 except tarfile.TarError as exc:
                     raise JobError(
                         "Unable to unpack %s: '%s' - %s" %
                         (self.param_key, os.path.basename(root), exc))
     else:
         raise RuntimeError("Unable to decompress %s: '%s'" %
                            (self.param_key, os.path.basename(root)))
     self.set_common_data('file', self.file_key, root_dir)
     self.logger.debug("Extracted %s to %s" % (self.file_key, root_dir))
     return connection
 def setUp(self):
     super(TestTimeout, self).setUp()
     self.parameters = {
         "job_name":
         "fakejob",
         'output_dir':
         mkdtemp(),
         'timeouts': {
             'job': {
                 'seconds': 3
             }
         },
         "actions": [{
             'deploy': {
                 'namespace': 'common',
                 'failure_retry': 3
             },
             'boot': {
                 'namespace': 'common',
                 'failure_retry': 4
             },
             'test': {
                 'namespace': 'common',
                 'failure_retry': 5
             }
         }]
     }
     self.fakejob = TestTimeout.FakeJob(self.parameters)
     # copy of the _timeout function from parser.
     if 'timeouts' in self.parameters:
         if 'job' in self.parameters['timeouts']:
             duration = Timeout.parse(self.parameters['timeouts']['job'])
             self.fakejob.timeout = Timeout(self.parameters['job_name'],
                                            duration)
Exemple #17
0
 def setUp(self):
     super(TestAutoLogin, self).setUp()
     factory = Factory()
     self.job = factory.create_kvm_job('sample_jobs/kvm-inline.yaml',
                                       mkdtemp())
     self.job.logger = DummyLogger()
     self.max_end_time = time.time() + 30
Exemple #18
0
 def setUp(self):
     super(TestIsoJob, self).setUp()
     factory = InstallerFactory()
     self.job = factory.create_qemu_installer_job(mkdtemp())
     self.assertIsNotNone(self.job)
     self.assertIsInstance(self.job, Job)
     self.assertIsInstance(self.job.pipeline, Pipeline)
Exemple #19
0
 def test_image(self):
     self.deploy_block['kernel']['type'] = 'image'
     job = self.parser.parse(yaml.dump(self.base_data),
                             self.device,
                             4212,
                             None,
                             "",
                             output_dir=mkdtemp())
     job.logger = DummyLogger()
     job.validate()
     deploy = [
         action for action in job.pipeline.actions
         if action.name == 'tftp-deploy'
     ][0]
     overlay = [
         action for action in deploy.internal_pipeline.actions
         if action.name == 'prepare-tftp-overlay'
     ][0]
     prepare = [
         action for action in overlay.internal_pipeline.actions
         if action.name == 'prepare-kernel'
     ][0]
     uboot_prepare = [
         action for action in prepare.internal_pipeline.actions
         if action.name == 'uboot-prepare-kernel'
     ][0]
     self.assertEqual('image', uboot_prepare.kernel_type)
     # bbb-01.yaml does not contain booti parameters, try to convert to a uImage
     self.assertEqual('bootm', uboot_prepare.bootcommand)
     self.assertTrue(uboot_prepare.mkimage_conversion)
Exemple #20
0
 def __init__(self):
     """
     Uses the tftp directory for easier cleanup and for parity
     with the non-QEMU Debian Installer support.
     """
     super(DeployIsoAction, self).__init__()
     self.name = 'deploy-iso-installer'
     self.description = 'setup deployment for emulated installer'
     self.summary = 'pull kernel and initrd out of iso'
     self.suffix = None
     try:
         self.preseed_path = mkdtemp(basedir=tftpd_dir())
     except OSError:
         self.suffix = '/'
         self.preseed_path = mkdtemp()  # unit test support
     self.suffix = os.path.basename(self.preseed_path)
 def run(self, connection, max_end_time, args=None):
     connection = super(LoopMountAction, self).run(connection, max_end_time,
                                                   args)
     self.mntdir = mkdtemp(autoremove=False)
     lava_test_results_dir = self.get_namespace_data(
         action='test', label='results', key='lava_test_results_dir')
     test_mntdir = os.path.abspath("%s/%s" %
                                   (self.mntdir, lava_test_results_dir))
     self.set_namespace_data(action=self.name,
                             label='mntdir',
                             key='mntdir',
                             value=self.mntdir)
     self.set_namespace_data(action='mount-action',
                             label='mntdir',
                             key='mntdir',
                             value=test_mntdir)
     offset = self.get_namespace_data(action='download-action',
                                      label=self.key,
                                      key='offset')
     mount_cmd = [
         'mount', '-o',
         'loop,offset=%s' % offset,
         self.get_namespace_data(action='download-action',
                                 label=self.key,
                                 key='file'), self.mntdir
     ]
     command_output = self.run_command(mount_cmd)
     if command_output and command_output is not '':
         raise JobError("Unable to mount: %s" %
                        command_output)  # FIXME: JobError needs a unit test
     return connection
Exemple #22
0
 def test_basic_actions(self):
     factory = Factory()
     job = factory.create_fake_qemu_job(mkdtemp())
     if not job:
         return unittest.skip("not all deployments have been implemented")
     self.assertIsInstance(job, Job)
     self.assertIsInstance(job.pipeline, Pipeline)
Exemple #23
0
 def run(self, connection, args=None):
     """
     Check if a lava-test-shell has been requested, implement the overlay
     * create test runner directories beneath the temporary location
     * copy runners into test runner directories
     """
     self.data[self.name].setdefault('location', mkdtemp())
     self.logger.debug("Preparing overlay tarball in %s", self.data[self.name]['location'])
     if 'lava_test_results_dir' not in self.data:
         self.logger.error("Unable to identify lava test results directory - missing OS type?")
         return connection
     lava_path = os.path.abspath("%s/%s" % (self.data[self.name]['location'], self.data['lava_test_results_dir']))
     for runner_dir in ['bin', 'tests', 'results']:
         # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
         path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
         if not os.path.exists(path):
             os.makedirs(path, 0755)
             self.logger.debug("makedir: %s", path)
     for fname in self.scripts_to_copy:
         with open(fname, 'r') as fin:
             output_file = '%s/bin/%s' % (lava_path, os.path.basename(fname))
             self.logger.debug("Creating %s", output_file)
             with open(output_file, 'w') as fout:
                 fout.write("#!%s\n\n" % self.parameters['deployment_data']['lava_test_sh_cmd'])
                 fout.write(fin.read())
                 os.fchmod(fout.fileno(), self.xmod)
     connection = super(OverlayAction, self).run(connection, args)
     return connection
Exemple #24
0
 def test_namespace_data(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
     self.assertIsNotNone(job)
     test_action = job.pipeline.actions[0]
     test_action.validate()
     test_action.set_namespace_data('common', 'label', 'simple', 1)
     self.assertEqual(
         test_action.get_namespace_data('common', 'label', 'simple'), 1)
     test_action.set_namespace_data('common', 'ns', 'dict', {'key': False})
     self.assertEqual(
         test_action.get_namespace_data('common', 'ns', 'dict'),
         {'key': False})
     test_action.set_namespace_data('common', 'ns', 'list', [1, 2, 3, '4'])
     self.assertEqual(
         test_action.get_namespace_data('common', 'ns', 'list'),
         [1, 2, 3, '4'])
     test_action.set_namespace_data('common', 'ns', 'dict2',
                                    {'key': {
                                        'nest': True
                                    }})
     self.assertEqual(
         test_action.get_namespace_data('common', 'ns', 'dict2'),
         {'key': {
             'nest': True
         }})
     self.assertNotEqual(
         test_action.get_namespace_data('common', 'unknown', 'simple'), 1)
Exemple #25
0
 def test_lxc_api(self):
     job = self.factory.create_hikey_job('sample_jobs/hikey-oe.yaml',
                                         mkdtemp())
     description_ref = pipeline_reference('hikey-oe.yaml')
     job.validate()
     self.assertEqual(description_ref, job.pipeline.describe(False))
     self.assertIn(LxcProtocol.name,
                   [protocol.name for protocol in job.protocols])
     self.assertEqual(len(job.protocols), 1)
     self.assertIsNone(
         job.device.pre_os_command
     )  # FIXME: a real device config would typically need this.
     uefi_menu = [
         action for action in job.pipeline.actions
         if action.name == 'uefi-menu-action'
     ][0]
     select = [
         action for action in uefi_menu.internal_pipeline.actions
         if action.name == 'uefi-menu-selector'
     ][0]
     self.assertIn(LxcProtocol.name, select.parameters.keys())
     self.assertIn('protocols', select.parameters.keys())
     self.assertIn(LxcProtocol.name, select.parameters['protocols'].keys())
     self.assertEqual(len(select.parameters['protocols'][LxcProtocol.name]),
                      1)
     lxc_active = any([
         protocol for protocol in job.protocols
         if protocol.name == LxcProtocol.name
     ])
     self.assertTrue(lxc_active)
     for calling in select.parameters['protocols'][LxcProtocol.name]:
         self.assertEqual(calling['action'], select.name)
         self.assertEqual(calling['request'], 'pre-os-command')
 def run(self, connection, args=None):
     if not self.parameters.get('ramdisk', None):  # idempotency
         return connection
     connection = super(ExtractRamdisk, self).run(connection, args)
     ramdisk = self.data['download_action']['ramdisk']['file']
     ramdisk_dir = mkdtemp()
     extracted_ramdisk = os.path.join(ramdisk_dir, 'ramdisk')
     os.mkdir(extracted_ramdisk)
     ramdisk_compressed_data = os.path.join(ramdisk_dir, RAMDISK_COMPRESSED_FNAME)
     if self.parameters.get('ramdisk-type', None) == 'u-boot':
         # TODO: 64 bytes is empirical - may need to be configurable in the future
         cmd = ('dd if=%s of=%s ibs=64 skip=1' % (ramdisk, ramdisk_compressed_data)).split(' ')
         try:
             self.run_command(cmd)
         except:
             raise RuntimeError('Unable to remove uboot header: %s' % ramdisk)
     else:
         # give the file a predictable name
         os.rename(ramdisk, ramdisk_compressed_data)
     self.logger.debug(os.system("file %s" % ramdisk_compressed_data))
     cmd = ('gzip -d -f %s' % ramdisk_compressed_data).split(' ')
     if self.run_command(cmd) is not '':
         raise JobError('Unable to uncompress: %s - missing ramdisk-type?' % ramdisk_compressed_data)
     # filename has been changed by gzip
     ramdisk_data = os.path.join(ramdisk_dir, RAMDISK_FNAME)
     pwd = os.getcwd()
     os.chdir(extracted_ramdisk)
     cmd = ('cpio -i -F %s' % ramdisk_data).split(' ')
     if not self.run_command(cmd):
         raise JobError('Unable to uncompress: %s - missing ramdisk-type?' % ramdisk_data)
     os.chdir(pwd)
     # tell other actions where the unpacked ramdisk can be found
     self.data[self.name]['extracted_ramdisk'] = extracted_ramdisk  # directory
     self.data[self.name]['ramdisk_file'] = ramdisk_data  # filename
     return connection
Exemple #27
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'd02',
            'job_name': 'grub-standard-ramdisk',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'output_dir': mkdtemp(),
            'actions': {
                'boot': {
                    'method': 'grub',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                    'dtb': 'broken.dtb'
                }
            }
        }
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml'))
        job = Job(4212, None, None, None, parameters)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.set_pipeline(pipeline)
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        try:
            ip_addr = dispatcher_ip()
        except InfrastructureError as exc:
            raise RuntimeError("Unable to get dispatcher IP address: %s" % exc)
        parsed = []
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']
        dtb = parameters['actions']['deploy']['dtb']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            # the addresses need to be hexadecimal
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{DTB}': dtb
        }
        params = device['actions']['boot']['methods']
        commands = params['grub']['ramdisk']['commands']
        self.assertIn('net_bootp', commands)
        self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands)
        self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands)
        self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands)

        params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary)
        substituted_commands = params['grub']['ramdisk']['commands']
        self.assertIs(type(substituted_commands), list)
        self.assertIn('net_bootp', substituted_commands)
        self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands)
        self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands)
        self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed)
        self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
Exemple #28
0
 def __init__(self):
     super(TftpAction, self).__init__()
     self.name = "tftp-deploy"
     self.description = "download files and deploy using tftp"
     self.summary = "tftp deploment"
     self.tftp_dir = tftpd_dir()
     self.suffix = None
     try:
         self.tftp_dir = mkdtemp(basedir=self.tftp_dir)
     except OSError:
         # allows for unit tests to operate as normal user.
         self.suffix = '/'
     self.download_dir = DISPATCHER_DOWNLOAD_DIR  # used for NFS
     try:
         self.download_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         pass
Exemple #29
0
 def __init__(self):
     super(TftpAction, self).__init__()
     self.name = "tftp-deploy"
     self.description = "download files and deploy using tftp"
     self.summary = "tftp deployment"
     self.tftp_dir = tftpd_dir()
     self.suffix = None
     try:
         self.tftp_dir = mkdtemp(basedir=self.tftp_dir)
     except OSError:
         # allows for unit tests to operate as normal user.
         self.suffix = '/'
     self.download_dir = DISPATCHER_DOWNLOAD_DIR  # used for NFS
     try:
         self.download_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         pass
 def test_primary_ssh(self):
     factory = ConnectionFactory()
     job = factory.create_ssh_job('sample_jobs/primary-ssh.yaml', mkdtemp())
     job.validate()
     overlay = [action for action in job.pipeline.actions if action.name == 'scp-overlay'][0]
     self.assertIsNotNone(overlay.parameters['deployment_data'])
     tar_flags = overlay.parameters['deployment_data']['tar_flags'] if 'tar_flags' in overlay.parameters['deployment_data'].keys() else ''
     self.assertIsNotNone(tar_flags)
 def test_udev_actions(self):
     self.factory = FastBootFactory()
     job = self.factory.create_db410c_job('sample_jobs/db410c.yaml', mkdtemp())
     self.assertTrue(job.device.get('fastboot_via_uboot', True))
     self.assertEqual('', self.job.device.power_command)
     description_ref = self.pipeline_reference('db410c.yaml', job=job)
     self.assertEqual(description_ref, job.pipeline.describe(False))
     boot = [action for action in job.pipeline.actions if action.name == 'fastboot-boot'][0]
Exemple #32
0
 def run(self, connection, args=None):
     # qemu-img create hd_img.img 2G
     base_dir = mkdtemp()
     output = os.path.join(base_dir, 'hd.img')
     self.logger.info("Creating base image of size: %s bytes", self.size)
     prepare_install_base(output, self.size)
     self.set_common_data(self.name, 'output', output)
     self.results = {'success': output}
     return connection
Exemple #33
0
    def test_pipeline(self):
        description_ref = pipeline_reference('kvm-inline.yaml')
        self.assertEqual(description_ref, self.job.pipeline.describe(False))

        self.assertEqual(len(self.job.pipeline.describe()), 4)
        for action in self.job.pipeline.actions:
            if isinstance(action, DeployAction):
                overlay = action.pipeline.children[action.pipeline][3]
                testdef = overlay.internal_pipeline.actions[1]
                inline_repo = testdef.internal_pipeline.actions[0]
                break

            # Test the InlineRepoAction directly
            location = mkdtemp()
            inline_repo.data['lava-overlay'] = {'location': location}
            inline_repo.data['test-definition'] = {'overlay_dir': location}

            inline_repo.run(None)
            yaml_file = os.path.join(
                location,
                'tests/0_smoke-tests-inline/inline/smoke-tests-basic.yaml')
            self.assertTrue(os.path.exists(yaml_file))
            with open(yaml_file, 'r') as f_in:
                testdef = yaml.load(f_in)
            expected_testdef = {
                'metadata': {
                    'description':
                    'Basic system test command for Linaro Ubuntu images',
                    'devices': [
                        'panda', 'panda-es', 'arndale', 'vexpress-a9',
                        'vexpress-tc2'
                    ],
                    'format':
                    'Lava-Test Test Definition 1.0',
                    'name':
                    'smoke-tests-basic',
                    'os': ['ubuntu'],
                    'scope': ['functional'],
                    'yaml_line':
                    39
                },
                'run': {
                    'steps': [
                        'lava-test-case linux-INLINE-pwd --shell pwd',
                        'lava-test-case linux-INLINE-uname --shell uname -a',
                        'lava-test-case linux-INLINE-vmstat --shell vmstat',
                        'lava-test-case linux-INLINE-ifconfig --shell ifconfig -a',
                        'lava-test-case linux-INLINE-lscpu --shell lscpu',
                        'lava-test-case linux-INLINE-lsusb --shell lsusb',
                        'lava-test-case linux-INLINE-lsb_release --shell lsb_release -a'
                    ],
                    'yaml_line':
                    53
                },
                'yaml_line': 38
            }
            self.assertEqual(testdef, expected_testdef)
Exemple #34
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'd02',
            'job_name': 'grub-standard-ramdisk',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'output_dir': mkdtemp(),
            'actions': {
                'boot': {
                    'method': 'grub',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                    'dtb': 'broken.dtb'
                }
            }
        }
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml'))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']
        dtb = parameters['actions']['deploy']['dtb']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            # the addresses need to be hexadecimal
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{DTB}': dtb
        }
        params = device['actions']['boot']['methods']
        commands = params['grub']['ramdisk']['commands']
        self.assertIn('net_bootp', commands)
        self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands)
        self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands)
        self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands)

        params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary)
        substituted_commands = params['grub']['ramdisk']['commands']
        self.assertIs(type(substituted_commands), list)
        self.assertIn('net_bootp', substituted_commands)
        self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands)
        self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands)
        self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed)
        self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
Exemple #35
0
 def test_describe(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
     self.assertIsNotNone(job)
     pipe = job.pipeline.describe()
     for item in pipe:
         self.assertNotIn('match', item)
         if 'pipeline' in item:
             for element in item['pipeline']:
                 self.assertNotIn('match', element)
Exemple #36
0
 def __init__(self):
     super(FastbootAction, self).__init__()
     self.name = "fastboot-deploy"
     self.description = "download files and deploy using fastboot"
     self.summary = "fastboot deployment"
     self.fastboot_dir = DISPATCHER_DOWNLOAD_DIR
     try:
         self.fastboot_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         pass
Exemple #37
0
 def run(self, connection, args=None):
     if not self.parameters.get(self.param_key, None):  # idempotency
         return connection
     connection = super(ExtractRootfs, self).run(connection, args)
     root = self.data['download_action'][self.param_key]['file']
     root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     untar_file(root, root_dir)
     self.set_common_data('file', self.file_key, root_dir)
     self.logger.debug("Extracted %s to %s", self.file_key, root_dir)
     return connection
Exemple #38
0
 def test_describe(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
     self.assertIsNotNone(job)
     pipe = job.pipeline.describe()
     for item in pipe:
         self.assertNotIn('match', item)
         if 'pipeline' in item:
             for element in item['pipeline']:
                 self.assertNotIn('match', element)
 def run(self, connection, args=None):
     if not self.parameters.get(self.param_key, None):  # idempotency
         return connection
     connection = super(ExtractRootfs, self).run(connection, args)
     root = self.data['download_action'][self.param_key]['file']
     root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     untar_file(root, root_dir)
     self.set_common_data('file', self.file_key, root_dir)
     self.logger.debug("Extracted %s to %s", self.file_key, root_dir)
     return connection
Exemple #40
0
 def __init__(self):
     super(FastbootAction, self).__init__()
     self.name = "fastboot-deploy"
     self.description = "download files and deploy using fastboot"
     self.summary = "fastboot deployment"
     self.fastboot_dir = DISPATCHER_DOWNLOAD_DIR
     try:
         self.fastboot_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         pass
 def test_describe(self):
     factory = Factory()
     job = factory.create_kvm_job("sample_jobs/kvm.yaml", mkdtemp())
     self.assertIsNotNone(job)
     pipe = job.pipeline.describe()
     for item in pipe:
         self.assertNotIn("match", item)
         if "pipeline" in item:
             for element in item["pipeline"]:
                 self.assertNotIn("match", element)
Exemple #42
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'x86',
            'job_name': 'ipxe-pipeline',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'output_dir': mkdtemp(),
            'actions': {
                'boot': {
                    'method': 'ipxe',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                }
            }
        }
        device = NewDevice(
            os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{LAVA_MAC}': "00:00:00:00:00:00"
        }
        params = device['actions']['boot']['methods']
        params['ipxe']['ramdisk']['commands'] = substitute(
            params['ipxe']['ramdisk']['commands'], substitution_dictionary)

        commands = params['ipxe']['ramdisk']['commands']
        self.assertIs(type(commands), list)
        self.assertIn("dhcp net0", commands)
        self.assertIn(
            "set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00",
            commands)
        self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands)
        self.assertNotIn(
            "kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}",
            commands)
        self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands)
        self.assertIn("boot", commands)
 def test_qemu_notest(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm-notest.yaml', mkdtemp())
     job.validate()
     self.assertIsNotNone(job)
     self.assertIsNotNone(job.pipeline)
     self.assertIsNotNone(job.pipeline.actions)
     for action in job.pipeline.actions:
         action.validate()
         self.assertTrue(action.valid)
     self.assertTrue(find_autologin(job))
Exemple #44
0
 def __init__(self):
     super(FlashRomDeploy, self).__init__()
     self.name = "flashrom"
     self.summary = "deploy a coreboot image via flashrom"
     self.description = "deploy a coreboot image via flashrom"
     self.section = 'deploy'
     self.items = []
     self.download_dir = DISPATCHER_DOWNLOAD_DIR
     try:
         self.download_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         pass
 def test_qemu_monitor_zephyr_job(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/zephyr-qemu-test-task.yaml',
                                  mkdtemp())
     job.validate()
     self.assertIsNotNone(job)
     self.assertIsNotNone(job.pipeline)
     self.assertIsNotNone(job.pipeline.actions)
     for action in job.pipeline.actions:
         action.validate()
         self.assertTrue(action.valid)
     self.assertFalse(find_autologin(job))
Exemple #46
0
 def __init__(self):
     super(ScpOverlay, self).__init__()
     self.name = "scp-overlay"
     self.summary = "copy overlay to device"
     self.description = "prepare overlay and scp to device"
     self.section = 'deploy'
     self.items = []
     try:
         self.scp_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         # allows for unit tests to operate as normal user.
         self.suffix = '/'
Exemple #47
0
 def populate(self, parameters):
     self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
     path = mkdtemp()
     if 'uefi' in parameters:
         uefi_path = mkdtemp()
         download = DownloaderAction('uefi', uefi_path)
         download.max_retries = 3
         self.internal_pipeline.add_action(download)
         # uefi option of QEMU needs a directory, not the filename
         self.set_common_data('image', 'uefi_dir', uefi_path)  # just the path, not the filename
     for image in parameters['images'].keys():
         if image != 'yaml_line':
             download = DownloaderAction(image, path)
             download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
             self.internal_pipeline.add_action(download)
             if parameters['images'][image].get('format', '') == 'qcow2':
                 self.internal_pipeline.add_action(QCowConversionAction(image))
     self.internal_pipeline.add_action(CustomisationAction())
     self.internal_pipeline.add_action(OverlayAction())  # idempotent, includes testdef
     self.internal_pipeline.add_action(ApplyOverlayGuest())
     self.internal_pipeline.add_action(DeployDeviceEnvironment())
Exemple #48
0
 def run(self, connection, args=None):
     if not self.data['compress-overlay'].get('output'):
         raise RuntimeError("Unable to find the overlay")
     guest_dir = mkdtemp()
     guest_file = os.path.join(guest_dir, self.guest_filename)
     self.set_common_data('guest', 'filename', guest_file)
     blkid = prepare_guestfs(
         guest_file, self.data['compress-overlay'].get('output'),
         self.job.device['actions']['deploy']['methods']['image']
         ['parameters']['guest']['size'])
     self.results = {'success': blkid}
     return connection
Exemple #49
0
 def run(self, connection, args=None):
     connection = super(ApplyOverlayTftp, self).run(connection, args)
     overlay_file = None
     directory = None
     nfs_url = None
     if self.parameters.get('nfsrootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'nfsroot')
         self.logger.info("Applying overlay to NFS")
     elif self.parameters.get('nfs_url', None) is not None:
         nfs_url = self.parameters.get('nfs_url')
         overlay_file = self.data['compress-overlay'].get('output')
         self.logger.info("Applying overlay to persistent NFS")
         # need to mount the persistent NFS here.
         # We can't use self.mkdtemp() here because this directory should
         # not be removed if umount fails.
         directory = mkdtemp(autoremove=False)
         try:
             subprocess.check_output(
                 ['mount', '-t', 'nfs', nfs_url, directory])
         except subprocess.CalledProcessError as exc:
             raise JobError(exc)
     elif self.parameters.get('ramdisk', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.data['extract-overlay-ramdisk'][
             'extracted_ramdisk']
         self.logger.info("Applying overlay to ramdisk")
     elif self.parameters.get('rootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'root')
     else:
         self.logger.debug("No overlay directory")
         self.logger.debug(self.parameters)
     if self.parameters.get('os', None) == "centos_installer":
         # centos installer ramdisk doesnt like having anything other
         # than the kickstart config being inserted. Instead, make the
         # overlay accessible through tftp. Yuck.
         tftp_dir = os.path.dirname(
             self.data['download_action']['ramdisk']['file'])
         shutil.copy(overlay_file, tftp_dir)
         suffix = self.data['tftp-deploy'].get('suffix', '')
         self.set_common_data(
             'file', 'overlay',
             os.path.join(suffix, os.path.basename(overlay_file)))
     if nfs_url:
         subprocess.check_output(['umount', directory])
         os.rmdir(directory)  # fails if the umount fails
     if overlay_file:
         untar_file(overlay_file, directory)
         if nfs_url:
             subprocess.check_output(['umount', directory])
             os.rmdir(directory)  # fails if the umount fails
     return connection
Exemple #50
0
 def __init__(self):
     super(ScpOverlay, self).__init__()
     self.name = "scp-overlay"
     self.summary = "copy overlay to device"
     self.description = "prepare overlay and scp to device"
     self.section = 'deploy'
     self.items = []
     try:
         self.scp_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     except OSError:
         # allows for unit tests to operate as normal user.
         self.suffix = '/'
Exemple #51
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'x86',
            'job_name': 'ipxe-pipeline',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'output_dir': mkdtemp(),
            'actions': {
                'boot': {
                    'method': 'ipxe',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                }
            }
        }
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
        job = Job(4212, None, parameters)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.set_pipeline(pipeline)
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        try:
            ip_addr = dispatcher_ip()
        except InfrastructureError as exc:
            raise RuntimeError("Unable to get dispatcher IP address: %s" % exc)
        parsed = []
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{LAVA_MAC}': "00:00:00:00:00:00"
        }
        params = device['actions']['boot']['methods']
        params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary)

        commands = params['ipxe']['ramdisk']['commands']
        self.assertIs(type(commands), list)
        self.assertIn("dhcp net0", commands)
        self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands)
        self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands)
        self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands)
        self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands)
        self.assertIn("boot", commands)
Exemple #52
0
 def populate(self, parameters):
     self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
     self.internal_pipeline.add_action(IsoEmptyImage())
     # the preseed file needs to go into the dispatcher apache tmp directory.
     self.internal_pipeline.add_action(DownloaderAction('preseed', self.preseed_path))
     iso_path = mkdtemp()
     self.internal_pipeline.add_action(DownloaderAction('iso', iso_path))
     self.internal_pipeline.add_action(IsoPullInstaller())
     self.internal_pipeline.add_action(QemuCommandLine())
     # prepare overlay at this stage - make it available after installation.
     self.internal_pipeline.add_action(CustomisationAction())
     self.internal_pipeline.add_action(OverlayAction())  # idempotent, includes testdef
     self.internal_pipeline.add_action(ApplyOverlayGuest())
     self.internal_pipeline.add_action(DeployDeviceEnvironment())
Exemple #53
0
 def populate(self, parameters):
     self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
     path = mkdtemp()
     download = DownloaderAction('image', path)
     download.max_retries = 3  # overridden by failure_retry in the parameters, if set.
     self.internal_pipeline.add_action(download)
     if parameters.get('format', '') == 'qcow2':
         self.internal_pipeline.add_action(QCowConversionAction('image'))
     self.internal_pipeline.add_action(MountAction())
     self.internal_pipeline.add_action(CustomisationAction())
     self.internal_pipeline.add_action(OverlayAction())  # idempotent, includes testdef
     self.internal_pipeline.add_action(ApplyOverlayImage())  # specific to image deployments
     self.internal_pipeline.add_action(DeployDeviceEnvironment())
     self.internal_pipeline.add_action(UnmountAction())
Exemple #54
0
 def test_common_data(self):
     factory = Factory()
     job = factory.create_kvm_job('sample_jobs/kvm.yaml', mkdtemp())
     self.assertIsNotNone(job)
     test_action = job.pipeline.actions[0]
     test_action.validate()
     test_action.set_common_data('ns', 'simple', 1)
     self.assertEqual(test_action.get_common_data('ns', 'simple'), 1)
     test_action.set_common_data('ns', 'dict', {'key': False})
     self.assertEqual(test_action.get_common_data('ns', 'dict'), {'key': False})
     test_action.set_common_data('ns', 'list', [1, 2, 3, '4'])
     self.assertEqual(test_action.get_common_data('ns', 'list'), [1, 2, 3, '4'])
     test_action.set_common_data('ns', 'dict2', {'key': {'nest': True}})
     self.assertEqual(test_action.get_common_data('ns', 'dict2'), {'key': {'nest': True}})
     self.assertNotEqual(test_action.get_common_data('unknown', 'simple'), 1)
 def run(self, connection, args=None):
     connection = super(ApplyOverlayTftp, self).run(connection, args)
     overlay_file = None
     directory = None
     nfs_url = None
     if self.parameters.get('nfsrootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'nfsroot')
         self.logger.info("Applying overlay to NFS")
     elif self.parameters.get('nfs_url', None) is not None:
         nfs_url = self.parameters.get('nfs_url')
         overlay_file = self.data['compress-overlay'].get('output')
         self.logger.info("Applying overlay to persistent NFS")
         # need to mount the persistent NFS here.
         # We can't use self.mkdtemp() here because this directory should
         # not be removed if umount fails.
         directory = mkdtemp(autoremove=False)
         try:
             subprocess.check_output(['mount', '-t', 'nfs', nfs_url, directory])
         except subprocess.CalledProcessError as exc:
             raise JobError(exc)
     elif self.parameters.get('ramdisk', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
         self.logger.info("Applying overlay to ramdisk")
     elif self.parameters.get('rootfs', None) is not None:
         overlay_file = self.data['compress-overlay'].get('output')
         directory = self.get_common_data('file', 'root')
     else:
         self.logger.debug("No overlay directory")
         self.logger.debug(self.parameters)
     if self.parameters.get('os', None) == "centos_installer":
         # centos installer ramdisk doesnt like having anything other
         # than the kickstart config being inserted. Instead, make the
         # overlay accessible through tftp. Yuck.
         tftp_dir = os.path.dirname(self.data['download_action']['ramdisk']['file'])
         shutil.copy(overlay_file, tftp_dir)
         suffix = self.data['tftp-deploy'].get('suffix', '')
         self.set_common_data('file', 'overlay', os.path.join(suffix, os.path.basename(overlay_file)))
     if nfs_url:
         subprocess.check_output(['umount', directory])
         os.rmdir(directory)  # fails if the umount fails
     if overlay_file:
         untar_file(overlay_file, directory)
         if nfs_url:
             subprocess.check_output(['umount', directory])
             os.rmdir(directory)  # fails if the umount fails
     return connection
 def run(self, connection, args=None):
     if not self.parameters.get('nfsrootfs', None):  # idempotency
         return connection
     connection = super(ExtractNfsRootfs, self).run(connection, args)
     nfsroot = self.data['download_action']['nfsrootfs']['file']
     nfsroot_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
     try:
         tar = tarfile.open(nfsroot)
         tar.extractall(nfsroot_dir)
         tar.close()
     except tarfile.TarError as exc:
         raise JobError("Unable to unpack nfsroot: '%s' - %s" % (os.path.basename(nfsroot), exc))
     self.set_common_data('file', 'nfsroot', nfsroot_dir)
     # self.data[self.name].setdefault('nfsroot', nfsroot_dir)
     self.logger.debug("Extracted nfs root to %s" % nfsroot_dir)
     return connection
Exemple #57
0
    def test_pipeline(self):
        description_ref = pipeline_reference('kvm-inline.yaml')
        self.assertEqual(description_ref, self.job.pipeline.describe(False))

        self.assertEqual(len(self.job.pipeline.describe()), 4)
        inline_repo = None
        for action in self.job.pipeline.actions:
            if isinstance(action, DeployAction):
                self.assertIsNotNone(action.internal_pipeline.actions[2])
                overlay = action.pipeline.children[action.pipeline][2]
                self.assertIsNotNone(overlay.internal_pipeline.actions[2])
                testdef = overlay.internal_pipeline.actions[2]
                self.assertIsNotNone(testdef.internal_pipeline.actions[0])
                inline_repo = testdef.internal_pipeline.actions[0]
                break
        # Test the InlineRepoAction directly
        self.assertIsNotNone(inline_repo)
        location = mkdtemp()
        # other actions have not been run, so fake up
        inline_repo.data['lava_test_results_dir'] = location
        inline_repo.data['lava-overlay'] = {'location': location}
        inline_repo.data['test-definition'] = {'overlay_dir': location}

        inline_repo.run(None)
        yaml_file = os.path.join(location, '0/tests/0_smoke-tests-inline/inline/smoke-tests-basic.yaml')
        self.assertTrue(os.path.exists(yaml_file))
        with open(yaml_file, 'r') as f_in:
            testdef = yaml.load(f_in)
        expected_testdef = {'metadata':
                            {'description': 'Basic system test command for Linaro Ubuntu images',
                             'devices': ['panda', 'panda-es', 'arndale', 'vexpress-a9', 'vexpress-tc2'],
                             'format': 'Lava-Test Test Definition 1.0',
                             'name': 'smoke-tests-basic',
                             'os': ['ubuntu'],
                             'scope': ['functional'],
                             'yaml_line': 39},
                            'run': {'steps': ['lava-test-case linux-INLINE-pwd --shell pwd',
                                              'lava-test-case linux-INLINE-uname --shell uname -a',
                                              'lava-test-case linux-INLINE-vmstat --shell vmstat',
                                              'lava-test-case linux-INLINE-ifconfig --shell ifconfig -a',
                                              'lava-test-case linux-INLINE-lscpu --shell lscpu',
                                              'lava-test-case linux-INLINE-lsusb --shell lsusb',
                                              'lava-test-case linux-INLINE-lsb_release --shell lsb_release -a'],
                                    'yaml_line': 53},
                            'yaml_line': 38}
        self.assertEqual(set(testdef), set(expected_testdef))
 def test_multi_deploy(self):
     self.assertIsNotNone(self.parsed_data)
     job = Job(4212, None, self.parsed_data)
     pipeline = Pipeline(job=job)
     device = TestMultiDeploy.FakeDevice()
     self.assertIsNotNone(device)
     job.device = device
     job.parameters['output_dir'] = mkdtemp()
     job.set_pipeline(pipeline)
     counts = {}
     for action_data in self.parsed_data['actions']:
         for name in action_data:
             counts.setdefault(name, 1)
             if counts[name] >= 2:
                 reset_context = ResetContext()
                 reset_context.section = 'deploy'
                 pipeline.add_action(reset_context)
             parameters = action_data[name]
             test_deploy = TestMultiDeploy.TestDeploy(pipeline, parameters, job)
             self.assertEqual(
                 {'common': {}},
                 test_deploy.action.data
             )
             counts[name] += 1
     # check that only one action has the example set
     self.assertEqual(
         ['nowhere'],
         [detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy']]
     )
     self.assertEqual(
         ['faked', 'valid'],
         [detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy']]
     )
     self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction)
     self.assertIsInstance(pipeline.actions[1], ResetContext)
     self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction)
     self.assertIsInstance(pipeline.actions[3], ResetContext)
     self.assertIsInstance(pipeline.actions[4], TestMultiDeploy.TestDeployAction)
     job.validate()
     self.assertEqual([], job.pipeline.errors)
     job.run()
     self.assertNotEqual(pipeline.actions[0].data, {'common': {}, 'fake_deploy': pipeline.actions[0].parameters})
     self.assertNotEqual(pipeline.actions[1].data, {'common': {}, 'fake_deploy': pipeline.actions[1].parameters})
     self.assertEqual(pipeline.actions[2].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters})
     # check that values from previous DeployAction run actions have been cleared
     self.assertEqual(pipeline.actions[4].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters})
 def run(self, connection, args=None):
     connection = super(LoopMountAction, self).run(connection, args)
     self.data[self.name]['mntdir'] = mkdtemp(autoremove=False)
     self.data['mount_action']['mntdir'] = \
         os.path.abspath("%s/%s" % (self.data[self.name]['mntdir'], self.data['lava_test_results_dir']))
     mount_cmd = [
         'mount',
         '-o',
         'loop,offset=%s' % self.data['download_action']['offset'],
         self.data['download_action']['image']['file'],
         self.data[self.name]['mntdir']
     ]
     command_output = self.run_command(mount_cmd)
     self.mntdir = self.data['loop_mount']['mntdir']
     self.data['mount_action']['mntdir'] = \
         os.path.abspath("%s/%s" % (self.data[self.name]['mntdir'], self.data['lava_test_results_dir']))
     if command_output and command_output is not '':
         raise JobError("Unable to mount: %s" % command_output)  # FIXME: JobError needs a unit test
     return connection
 def setUp(self):
     self.parameters = {
         "job_name": "fakejob",
         'output_dir': mkdtemp(),
         "actions": [
             {
                 'deploy': {
                     'failure_retry': 3
                 },
                 'boot': {
                     'failure_retry': 4
                 },
                 'test': {
                     'failure_retry': 5
                 }
             }
         ]
     }
     self.fakejob = TestAdjuvant.FakeJob(self.parameters)