Пример #1
0
 def test_job_create_singlenode(self):
     """
     Test the creation of a LavaTestJob within the
     dispatcher to identify issues before the job has
     started to run.
     """
     factory = DispatcherFactory()
     target = factory.create_qemu_target(self.name, {})
     self.assertEqual(target.config.hostname, self.name)
     json_str = factory.singlenode_jobdata()
     self.assertNotEqual(json_str, None)
     jobdata = json.loads(json_str)
     self.assertEqual(jobdata['health_check'], False)
     validate_job_data(jobdata)
     # single node
     self.assertNotIn('target_group', jobdata)
     self.assertNotIn('is_vmhost', jobdata)
     job = LavaTestJob(json_str, sys.stderr, get_config(), None)
     self.assertEqual(job.target, target.config.hostname)
     self.assertIsNotNone(get_all_cmds())
     # FIXME: would be useful to not have the metadata population only accessible via LavaTestJob.run()
     job.run()
     self.assertEqual(job.context.test_data.metadata['target'], self.name)
     self.assertEqual(job.context.test_data.metadata['target.hostname'], self.name)
     self.assertNotIn('is_vmhost', job.context.test_data.metadata)
     self.assertNotIn('host_ip', job.context.test_data.metadata)
     self.assertNotIn('target_group', job.context.test_data.metadata)
     self.assertNotIn('vm_group', job.context.test_data.metadata)
Пример #2
0
def run_legacy_job(job_data, oob_file, config, output_dir, validate):

    if os.getuid() != 0:
        logging.error("lava dispatch has to be run as root")
        exit(1)

    json_job_data = json.dumps(job_data)
    job = LavaTestJob(json_job_data, oob_file, config, output_dir)

    # FIXME Return status
    if validate:
        try:
            validate_job_data(job.job_data)
        except ValidationError as e:
            print(e)
    else:
        job.run()
Пример #3
0
    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'",
                     self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s",
                     self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host,
                             self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))
 def run_tests(self, json_jobdata, group_data):
     if 'response' in group_data and group_data['response'] == 'nack':
         logging.error(
             "Unable to initiliase a Multi-Node group - timed out waiting for other devices."
         )
         return
     config = get_config()
     if 'logging_level' in json_jobdata:
         logging.root.setLevel(json_jobdata["logging_level"])
     else:
         logging.root.setLevel(config.logging_level)
     if 'target' not in json_jobdata:
         logging.error("The job file does not specify a target device.")
         exit(1)
     jobdata = json.dumps(json_jobdata)
     if self.output_dir and not os.path.isdir(self.output_dir):
         os.makedirs(self.output_dir)
     job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
     # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
     job.run(self, group_data, vm_host_ip=self.vm_host_ip)
Пример #5
0
 def test_job_create_multinode(self):
     """
     Test the creation of a MultiNode LavaTestJob within
     the dispatcher to identify issues before the job has
     started to run and without needing to talk to the
     lava-coordinator.
     This job is only one part of a MultiNode job - it
     cannot be expected to run as-is.
     """
     factory = DispatcherFactory()
     for name in self.device_names:
         target = factory.create_qemu_target(name, {})
         json_str = factory.multinode_jobdata(len(self.device_names))
         self.assertEqual(target.config.hostname, name)
         self.assertNotEqual(json_str, None)
         jobdata = json.loads(json_str)
         self.assertEqual(jobdata['health_check'], False)
         validate_job_data(jobdata)
         # multi node
         self.assertIn('target_group', jobdata)
         self.assertNotIn('is_vmhost', jobdata)
         job = LavaTestJob(json_str, sys.stderr, get_config(), None)
         self.assertEqual(job.target, target.config.hostname)