def test_server_ip(self):
     create_config('lava-dispatcher.conf',
                   {'LAVA_SERVER_IP': '99.99.99.99'})
     server_config = get_config()
     expected = "99.99.99.99"
     lava_server_ip = server_config.lava_server_ip
     self.assertEqual(expected, lava_server_ip)
Exemplo n.º 2
0
 def create_job(self, filename, output_dir=None):
     device = self.create_kvm_target()
     kvm_yaml = os.path.join(os.path.dirname(__file__), filename)
     self.sample_job_data = open(kvm_yaml)
     self.parser = JobParser()
     job = self.parser.parse(self.sample_job_data, device, output_dir=output_dir)
     job.context = LavaContext(device.config.hostname, get_config(), sys.stderr, job.parameters, '/tmp')
     return job
Exemplo n.º 3
0
def _create_qemu_target(extra_device_config={}):
    create_config('lava-dispatcher.conf', {})

    device_config_data = {'device_type': 'qemu'}
    device_config_data.update(extra_device_config)
    device_config = create_device_config('qemu01', device_config_data)

    dispatcher_config = get_config()

    context = LavaContext('qemu01', dispatcher_config, None, None, None)
    return QEMUTarget(context, device_config)
def _create_qemu_target(extra_device_config={}):
    create_config('lava-dispatcher.conf', {})

    device_config_data = {'device_type': 'qemu'}
    device_config_data.update(extra_device_config)
    device_config = create_device_config('qemu01', device_config_data)

    dispatcher_config = get_config()

    context = LavaContext('qemu01', dispatcher_config, None, None, None)
    return QEMUTarget(context, device_config)
 def create_job(self, filename, output_dir=None):
     device = self.create_kvm_target()
     kvm_yaml = os.path.join(os.path.dirname(__file__), filename)
     self.sample_job_data = open(kvm_yaml)
     self.parser = JobParser()
     job = self.parser.parse(self.sample_job_data,
                             device,
                             output_dir=output_dir)
     job.context = LavaContext(device.config.hostname, get_config(),
                               sys.stderr, job.parameters, '/tmp')
     return job
Exemplo n.º 6
0
    def create_kvm_target(self, extra_device_config=None):
        if not extra_device_config:
            extra_device_config = {}
        create_config('lava-dispatcher.conf', {})

        device_config_data = {'device_type': 'kvm'}
        device_config_data.update(extra_device_config)
        device_config = create_device_config('fakekvm', device_config_data)  # use a device name unlikely to exist

        dispatcher_config = get_config()

        context = LavaContext('fakekvm', dispatcher_config, None, None, None)
        return QEMUTarget(context, device_config)
    def create_kvm_target(self, extra_device_config=None):
        if not extra_device_config:
            extra_device_config = {}
        create_config('lava-dispatcher.conf', {})

        device_config_data = {'device_type': 'kvm'}
        device_config_data.update(extra_device_config)
        device_config = create_device_config(
            'fakekvm',
            device_config_data)  # use a device name unlikely to exist

        dispatcher_config = get_config()

        context = LavaContext('fakekvm', dispatcher_config, None, None, None)
        return QEMUTarget(context, device_config)
Exemplo n.º 8
0
 def run_tests(self, json_jobdata, group_data):
     if 'response' in group_data and group_data['response'] == 'nack':
         logging.error("Unable to initiliase a Multi-Node group - timed out waiting for other devices.")
         return
     config = get_config()
     if 'logging_level' in json_jobdata:
         logging.root.setLevel(json_jobdata["logging_level"])
     else:
         logging.root.setLevel(config.logging_level)
     if 'target' not in json_jobdata:
         logging.error("The job file does not specify a target device.")
         exit(1)
     jobdata = json.dumps(json_jobdata)
     if self.output_dir and not os.path.isdir(self.output_dir):
         os.makedirs(self.output_dir)
     job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
     # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
     job.run(self, group_data, vm_host_ip=self.vm_host_ip)
Exemplo n.º 9
0
    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'",
                     self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s",
                     self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host,
                             self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))
 def run_tests(self, json_jobdata, group_data):
     if 'response' in group_data and group_data['response'] == 'nack':
         logging.error(
             "Unable to initiliase a Multi-Node group - timed out waiting for other devices."
         )
         return
     config = get_config()
     if 'logging_level' in json_jobdata:
         logging.root.setLevel(json_jobdata["logging_level"])
     else:
         logging.root.setLevel(config.logging_level)
     if 'target' not in json_jobdata:
         logging.error("The job file does not specify a target device.")
         exit(1)
     jobdata = json.dumps(json_jobdata)
     if self.output_dir and not os.path.isdir(self.output_dir):
         os.makedirs(self.output_dir)
     job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
     # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
     job.run(self, group_data, vm_host_ip=self.vm_host_ip)
Exemplo n.º 11
0
    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'", self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s", self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host, self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))
Exemplo n.º 12
0
    def invoke(self):
        """
        Entry point for lava dispatch, after the arguments have been parsed.
        """

        if os.geteuid() != 0:
            logging.error("You need to be root to run lava-dispatch.")
            exit(1)

        if self.args.oob_fd:
            oob_file = os.fdopen(self.args.oob_fd, 'w')
        else:
            oob_file = sys.stderr

        # config the python logging
        # FIXME: move to lava-tool
        # XXX: this is horrible, but: undo the logging setup lava-tool has
        # done.
        del logging.root.handlers[:]
        del logging.root.filters[:]
        if is_pipeline_job(self.args.job_file):
            # Branch point for the pipeline code - currently reliant on a simple filename
            # extension match.
            self.config = None  # external config is loaded on-demand by the pipeline
            # pipeline *always* logs at debug level, so do not set from config.

            # Pipeline always log as YAML so change the base logger.
            # Every calls to logging.getLogger will now return a YAMLLogger.
            logging.setLoggerClass(YAMLLogger)
        else:
            FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
            DATEFMT = '%Y-%m-%d %I:%M:%S %p'
            logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
            try:
                self.config = get_config()
            except CommandError as e:
                if self.args.output_dir:
                    reporter = os.path.join(self.args.output_dir, "output.txt")
                    with open(reporter, 'a') as f:
                        f.write("Configuration error: %s\n" % e)
                else:
                    print(e)
                exit(1)
            # Set the logging level
            logging.root.setLevel(self.config.logging_level)

        # Set process id if job-id was passed to dispatcher
        if self.args.job_id:
            try:
                from setproctitle import setproctitle
            except ImportError:
                logging.warning(
                    ("Unable to import 'setproctitle', "
                     "process name cannot be changed"))
            else:
                setproctitle("lava-dispatch [job: %s]" % self.args.job_id)

        # Load the job file
        job_runner, job_data = self.parse_job_file(self.args.job_file, oob_file)

        if self.args.output_dir and not os.path.isdir(self.args.output_dir):
            os.makedirs(self.args.output_dir)

        # detect multinode and start a NodeDispatcher to work with the LAVA Coordinator.
        if not self.args.validate:
            if 'target_group' in job_data:
                node = NodeDispatcher(job_data, oob_file, self.args.output_dir)
                node.run()
                # the NodeDispatcher has started and closed.
                exit(0)
        if self.args.target is None:
            if 'target' not in job_data:
                logging.error("The job file does not specify a target device. "
                              "You must specify one using the --target option.")
                exit(1)
        else:
            job_data['target'] = self.args.target

        job_runner(job_data, oob_file, self.config, self.args.output_dir,
                   self.args.validate)
Exemplo n.º 13
0
    def invoke(self):
        """
        Entry point for lava dispatch, after the arguments have been parsed.
        """

        if os.geteuid() != 0:
            logging.error("You need to be root to run lava-dispatch.")
            exit(1)

        if self.args.oob_fd:
            oob_file = os.fdopen(self.args.oob_fd, 'w')
        else:
            oob_file = sys.stderr

        # config the python logging
        # FIXME: move to lava-tool
        # XXX: this is horrible, but: undo the logging setup lava-tool has
        # done.
        del logging.root.handlers[:]
        del logging.root.filters[:]
        if is_pipeline_job(self.args.job_file):
            # Branch point for the pipeline code - currently reliant on a simple filename
            # extension match.
            self.config = None  # external config is loaded on-demand by the pipeline
            # pipeline *always* logs at debug level, so do not set from config.

            # Pipeline always log as YAML so change the base logger.
            # Every calls to logging.getLogger will now return a YAMLLogger.
            logging.setLoggerClass(YAMLLogger)
        else:
            FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
            DATEFMT = '%Y-%m-%d %I:%M:%S %p'
            logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
            try:
                self.config = get_config()
            except CommandError as e:
                if self.args.output_dir:
                    reporter = os.path.join(self.args.output_dir, "output.txt")
                    with open(reporter, 'a') as f:
                        f.write("Configuration error: %s\n" % e)
                else:
                    print e
                exit(1)
            # Set the logging level
            logging.root.setLevel(self.config.logging_level)

        # Set process id if job-id was passed to dispatcher
        if self.args.job_id:
            try:
                from setproctitle import setproctitle
            except ImportError:
                logging.warning(
                    ("Unable to set import 'setproctitle', "
                     "process name cannot be changed"))
            else:
                setproctitle("lava-dispatch [job: %s]" % self.args.job_id)

        # Load the job file
        job_runner, job_data = self.parse_job_file(self.args.job_file, oob_file)

        if self.args.output_dir and not os.path.isdir(self.args.output_dir):
            os.makedirs(self.args.output_dir)

        # detect multinode and start a NodeDispatcher to work with the LAVA Coordinator.
        if not self.args.validate:
            if 'target_group' in job_data:
                node = NodeDispatcher(job_data, oob_file, self.args.output_dir)
                node.run()
                # the NodeDispatcher has started and closed.
                exit(0)
        if self.args.target is None:
            if 'target' not in job_data:
                logging.error("The job file does not specify a target device. "
                              "You must specify one using the --target option.")
                exit(1)
        else:
            job_data['target'] = self.args.target

        job_runner(job_data, oob_file, self.config, self.args.output_dir,
                   self.args.validate)
Exemplo n.º 14
0
    def invoke(self):

        if self.args.oob_fd:
            oob_file = os.fdopen(self.args.oob_fd, 'w')
        else:
            oob_file = sys.stderr

        # config the python logging
        # FIXME: move to lava-tool
        # XXX: this is horrible, but: undo the logging setup lava-tool has
        # done.
        del logging.root.handlers[:]
        del logging.root.filters[:]
        FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
        DATEFMT = '%Y-%m-%d %I:%M:%S %p'
        logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
        try:
            self.config = get_config()
        except CommandError as e:
            if self.args.output_dir:
                reporter = os.path.join(self.args.output_dir, "output.txt")
                with open(reporter, 'a') as f:
                    f.write("Configuration error: %s\n" % e)
            else:
                print(e)
            exit(1)
        logging.root.setLevel(self.config.logging_level)

        # Set process id if job-id was passed to dispatcher
        if self.args.job_id:
            try:
                from setproctitle import getproctitle, setproctitle
            except ImportError:
                logging.warning(
                    ("Unable to set import 'setproctitle', "
                     "process name cannot be changed"))
            else:
                setproctitle("%s [job: %s]" % (
                    getproctitle(), self.args.job_id))

        # Load the job file
        job_runner, job_data = self.parse_job_file(self.args.job_file, oob_file)

        # detect multinode and start a NodeDispatcher to work with the LAVA Coordinator.
        if not self.args.validate:
            if 'target_group' in job_data:
                node = NodeDispatcher(job_data, oob_file, self.args.output_dir)
                node.run()
                # the NodeDispatcher has started and closed.
                exit(0)
        if self.args.target is None:
            if 'target' not in job_data:
                logging.error("The job file does not specify a target device. "
                              "You must specify one using the --target option.")
                exit(1)
        else:
            job_data['target'] = self.args.target
        if self.args.output_dir and not os.path.isdir(self.args.output_dir):
            os.makedirs(self.args.output_dir)

        job_runner(job_data, oob_file, self.config, self.args.output_dir, self.args.validate)
Exemplo n.º 15
0
 def test_server_ip(self):
     create_config('lava-dispatcher.conf', {'LAVA_SERVER_IP': '99.99.99.99'})
     server_config = get_config()
     expected = "99.99.99.99"
     lava_server_ip = server_config.lava_server_ip
     self.assertEqual(expected, lava_server_ip)
    def invoke(self):

        if self.args.oob_fd:
            oob_file = os.fdopen(self.args.oob_fd, 'w')
        else:
            oob_file = sys.stderr

        # config the python logging
        # FIXME: move to lava-tool
        # XXX: this is horrible, but: undo the logging setup lava-tool has
        # done.
        del logging.root.handlers[:]
        del logging.root.filters[:]
        FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
        DATEFMT = '%Y-%m-%d %I:%M:%S %p'
        logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
        try:
            self.config = get_config()
        except CommandError as e:
            if self.args.output_dir:
                reporter = os.path.join(self.args.output_dir, "output.txt")
                with open(reporter, 'a') as f:
                    f.write("Configuration error: %s\n" % e)
            else:
                print(e)
            exit(1)
        logging.root.setLevel(self.config.logging_level)

        # Set process id if job-id was passed to dispatcher
        if self.args.job_id:
            try:
                from setproctitle import getproctitle, setproctitle
            except ImportError:
                logging.warning(("Unable to set import 'setproctitle', "
                                 "process name cannot be changed"))
            else:
                setproctitle("%s [job: %s]" %
                             (getproctitle(), self.args.job_id))

        # Load the job file
        job_runner, job_data = self.parse_job_file(self.args.job_file,
                                                   oob_file)

        # detect multinode and start a NodeDispatcher to work with the LAVA Coordinator.
        if not self.args.validate:
            if 'target_group' in job_data:
                node = NodeDispatcher(job_data, oob_file, self.args.output_dir)
                node.run()
                # the NodeDispatcher has started and closed.
                exit(0)
        if self.args.target is None:
            if 'target' not in job_data:
                logging.error(
                    "The job file does not specify a target device. "
                    "You must specify one using the --target option.")
                exit(1)
        else:
            job_data['target'] = self.args.target
        if self.args.output_dir and not os.path.isdir(self.args.output_dir):
            os.makedirs(self.args.output_dir)

        job_runner(job_data, oob_file, self.config, self.args.output_dir,
                   self.args.validate)