示例#1
0
 def test_job_create_singlenode(self):
     """
     Test the creation of a LavaTestJob within the
     dispatcher to identify issues before the job has
     started to run.
     """
     factory = DispatcherFactory()
     target = factory.create_qemu_target(self.name, {})
     self.assertEqual(target.config.hostname, self.name)
     json_str = factory.singlenode_jobdata()
     self.assertNotEqual(json_str, None)
     jobdata = json.loads(json_str)
     self.assertEqual(jobdata['health_check'], False)
     validate_job_data(jobdata)
     # single node
     self.assertNotIn('target_group', jobdata)
     self.assertNotIn('is_vmhost', jobdata)
     job = LavaTestJob(json_str, sys.stderr, get_config(), None)
     self.assertEqual(job.target, target.config.hostname)
     self.assertIsNotNone(get_all_cmds())
     # FIXME: would be useful to not have the metadata population only accessible via LavaTestJob.run()
     job.run()
     self.assertEqual(job.context.test_data.metadata['target'], self.name)
     self.assertEqual(job.context.test_data.metadata['target.hostname'], self.name)
     self.assertNotIn('is_vmhost', job.context.test_data.metadata)
     self.assertNotIn('host_ip', job.context.test_data.metadata)
     self.assertNotIn('target_group', job.context.test_data.metadata)
     self.assertNotIn('vm_group', job.context.test_data.metadata)
示例#2
0
def run_legacy_job(job_data, oob_file, config, output_dir, validate):

    if os.getuid() != 0:
        logging.error("lava dispatch has to be run as root")
        exit(1)

    json_job_data = json.dumps(job_data)
    job = LavaTestJob(json_job_data, oob_file, config, output_dir)

    # FIXME Return status
    if validate:
        try:
            validate_job_data(job.job_data)
        except ValidationError as e:
            print(e)
    else:
        job.run()
示例#3
0
def run_legacy_job(job_data, oob_file, config, output_dir, validate):

    if os.getuid() != 0:
        logging.error("lava dispatch has to be run as root")
        exit(1)

    json_job_data = json.dumps(job_data)
    job = LavaTestJob(json_job_data, oob_file, config, output_dir)

    # FIXME Return status
    if validate:
        try:
            validate_job_data(job.job_data)
        except ValidationError as e:
            print e
    else:
        job.run()
示例#4
0
 def run_tests(self, json_jobdata, group_data):
     if 'response' in group_data and group_data['response'] == 'nack':
         logging.error("Unable to initiliase a Multi-Node group - timed out waiting for other devices.")
         return
     config = get_config()
     if 'logging_level' in json_jobdata:
         logging.root.setLevel(json_jobdata["logging_level"])
     else:
         logging.root.setLevel(config.logging_level)
     if 'target' not in json_jobdata:
         logging.error("The job file does not specify a target device.")
         exit(1)
     jobdata = json.dumps(json_jobdata)
     if self.output_dir and not os.path.isdir(self.output_dir):
         os.makedirs(self.output_dir)
     job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
     # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
     job.run(self, group_data, vm_host_ip=self.vm_host_ip)
示例#5
0
    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'",
                     self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s",
                     self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host,
                             self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))
 def run_tests(self, json_jobdata, group_data):
     if 'response' in group_data and group_data['response'] == 'nack':
         logging.error(
             "Unable to initiliase a Multi-Node group - timed out waiting for other devices."
         )
         return
     config = get_config()
     if 'logging_level' in json_jobdata:
         logging.root.setLevel(json_jobdata["logging_level"])
     else:
         logging.root.setLevel(config.logging_level)
     if 'target' not in json_jobdata:
         logging.error("The job file does not specify a target device.")
         exit(1)
     jobdata = json.dumps(json_jobdata)
     if self.output_dir and not os.path.isdir(self.output_dir):
         os.makedirs(self.output_dir)
     job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
     # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
     job.run(self, group_data, vm_host_ip=self.vm_host_ip)
示例#7
0
    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'", self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s", self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host, self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))
示例#8
0
 def test_job_create_multinode(self):
     """
     Test the creation of a MultiNode LavaTestJob within
     the dispatcher to identify issues before the job has
     started to run and without needing to talk to the
     lava-coordinator.
     This job is only one part of a MultiNode job - it
     cannot be expected to run as-is.
     """
     factory = DispatcherFactory()
     for name in self.device_names:
         target = factory.create_qemu_target(name, {})
         json_str = factory.multinode_jobdata(len(self.device_names))
         self.assertEqual(target.config.hostname, name)
         self.assertNotEqual(json_str, None)
         jobdata = json.loads(json_str)
         self.assertEqual(jobdata['health_check'], False)
         validate_job_data(jobdata)
         # multi node
         self.assertIn('target_group', jobdata)
         self.assertNotIn('is_vmhost', jobdata)
         job = LavaTestJob(json_str, sys.stderr, get_config(), None)
         self.assertEqual(job.target, target.config.hostname)
示例#9
0
class NodeDispatcher(object):

    group_name = ''
    client_name = ''
    group_size = 0
    target = ''
    role = ''
    poller = None
    oob_file = sys.stderr
    output_dir = None
    base_msg = None
    json_data = None
    vm_host_ip = None
    is_dynamic_vm = False

    def __init__(self, json_data, oob_file=sys.stderr, output_dir=None):
        """
        Parse the modified JSON to identify the group name,
        requested port for the group - node comms
        and get the designation for this node in the group.
        """
        settings = readSettings("/etc/lava-coordinator/lava-coordinator.conf")
        self.json_data = json_data
        # FIXME: do this with a schema once the API settles
        if 'target_group' not in json_data:
            raise ValueError(
                "Invalid JSON to work with the MultiNode Coordinator: no target_group."
            )
        self.group_name = json_data['target_group']
        if 'group_size' not in json_data:
            raise ValueError(
                "Invalid JSON to work with the Coordinator: no group_size")
        self.group_size = json_data["group_size"]
        if 'target' not in json_data:
            raise ValueError(
                "Invalid JSON for a child node: no target designation.")
        self.target = json_data['target']
        if 'timeout' not in json_data:
            raise ValueError("Invalid JSON - no default timeout specified.")
        if "sub_id" not in json_data:
            logging.info(
                "Error in JSON - no sub_id specified. Results cannot be aggregated."
            )
            json_data['sub_id'] = None
        if 'port' in json_data:
            # lava-coordinator provides a conffile for the port and blocksize.
            logging.debug(
                "Port is no longer supported in the incoming JSON. Using %d",
                settings["port"])
        if 'role' in json_data:
            self.role = json_data['role']
        # look for a vm temporary device - vm_host is managed in boot_linaro_image.
        if 'is_vmhost' in json_data and not json_data['is_vmhost']:
            self.is_dynamic_vm = True
        # hostname of the server for the connection.
        if 'hostname' in json_data:
            # lava-coordinator provides a conffile for the group_hostname
            logging.debug(
                "Coordinator hostname is no longer supported in the incoming JSON. Using %s",
                settings['coordinator_hostname'])
        self.base_msg = {
            "port": settings['port'],
            "blocksize": settings['blocksize'],
            "poll_delay": settings["poll_delay"],
            "timeout": json_data['timeout'],
            "host": settings['coordinator_hostname'],
            "client_name": json_data['target'],
            "group_name": json_data['target_group'],
            # hostname here is the node hostname, not the server.
            "hostname": gethostname(),
            "role": self.role,
        }
        self.client_name = json_data['target']
        self.poller = Poller(json.dumps(self.base_msg))
        self.oob_file = oob_file
        self.output_dir = output_dir
        self.job = None

    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'",
                     self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s",
                     self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host,
                             self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))

    def __call__(self, args):
        """ Makes the NodeDispatcher callable so that the test shell can send messages just using the
        NodeDispatcher object.
        This function blocks until the specified API call returns. Some API calls may involve a
        substantial period of polling.
        :param args: JSON string of the arguments of the API call to make
        :return: A Python object containing the reply dict from the API call
        """
        try:
            return self._select(json.loads(args))
        except KeyError:
            logging.warning("Unable to handle request for: %s", args)

    def _select(self, json_data):
        """ Determines which API call has been requested, makes the call, blocks and returns the reply.
        :param json_data: Python object of the API call
        :return: Python object containing the reply dict.
        """
        reply_str = ''
        if not json_data:
            logging.debug("Empty args")
            return
        if 'request' not in json_data:
            logging.debug("Bad call")
            return
        if json_data["request"] == "aggregate":
            # no message processing here, just the bundles.
            return self._aggregation(json_data)
        messageID = json_data['messageID']
        if json_data['request'] == "lava_sync":
            logging.info("requesting lava_sync '%s'", messageID)
            reply_str = self.request_sync(messageID)
        elif json_data['request'] == 'lava_wait':
            logging.info("requesting lava_wait '%s'", messageID)
            reply_str = self.request_wait(messageID)
        elif json_data['request'] == 'lava_wait_all':
            if 'role' in json_data and json_data['role'] is not None:
                reply_str = self.request_wait_all(messageID, json_data['role'])
                logging.info("requesting lava_wait_all '%s' '%s'", messageID,
                             json_data['role'])
            else:
                logging.info("requesting lava_wait_all '%s'", messageID)
                reply_str = self.request_wait_all(messageID)
        elif json_data['request'] == "lava_send":
            logging.info("requesting lava_send %s", messageID)
            reply_str = self.request_send(messageID, json_data['message'])
        reply = json.loads(str(reply_str))
        if 'message' in reply:
            return reply['message']
        else:
            return reply['response']

    def _aggregation(self, json_data):
        """ Internal call to send the bundle message to the coordinator so that the node
        with sub_id zero will get the complete bundle and everyone else a blank bundle.
        :param json_data: Arbitrary data from the job which will form the result bundle
        """
        if json_data["bundle"] is None:
            logging.info("Notifying LAVA Coordinator of job completion")
        else:
            logging.info("Passing results bundle to LAVA Coordinator.")
        reply_str = self._send(json_data)
        reply = json.loads(str(reply_str))
        if 'message' in reply:
            return reply['message']
        else:
            return reply['response']

    def _send(self, msg):
        """ Internal call to perform the API call via the Poller.
        :param msg: The call-specific message to be wrapped in the base_msg primitive.
        :return: Python object of the reply dict.
        """
        new_msg = copy.deepcopy(self.base_msg)
        new_msg.update(msg)
        if 'bundle' in new_msg:
            logging.debug("sending result bundle")
        else:
            logging.debug("sending Message %s", json.dumps(new_msg))
        return self.poller.poll(json.dumps(new_msg))

    def request_wait_all(self, messageID, role=None):
        """
        Asks the Coordinator to send back a particular messageID
        and blocks until that messageID is available for all nodes in
        this group or all nodes with the specified role in this group.
        """
        # FIXME: if this node has not called request_send for the
        # messageID used for a wait_all, the node should log a warning
        # of a broken test definition.
        if role:
            return self._send({
                "request": "lava_wait_all",
                "messageID": messageID,
                "waitrole": role
            })
        else:
            return self._send({
                "request": "lava_wait_all",
                "messageID": messageID
            })

    def request_wait(self, messageID):
        """
        Asks the Coordinator to send back a particular messageID
        and blocks until that messageID is available for this node
        """
        # use self.target as the node ID
        wait_msg = {
            "request": "lava_wait",
            "messageID": messageID,
            "nodeID": self.target
        }
        return self._send(wait_msg)

    def request_send(self, messageID, message):
        """
        Sends a message to the group via the Coordinator. The
        message is guaranteed to be available to all members of the
        group. The message is only picked up when a client in the group
        calls lava_wait or lava_wait_all.
        The message needs to be formatted JSON, not a simple string.
        { "messageID": "string", "message": { "key": "value"} }
        The message can consist of just the messageID:
        { "messageID": "string" }
        """
        send_msg = {
            "request": "lava_send",
            "messageID": messageID,
            "message": message
        }
        return self._send(send_msg)

    def request_sync(self, msg):
        """
        Creates and send a message requesting lava_sync
        """
        sync_msg = {"request": "lava_sync", "messageID": msg}
        return self._send(sync_msg)

    def run_tests(self, json_jobdata, group_data):
        if 'response' in group_data and group_data['response'] == 'nack':
            logging.error(
                "Unable to initiliase a Multi-Node group - timed out waiting for other devices."
            )
            return
        if 'target' not in json_jobdata:
            logging.error("The job file does not specify a target device.")
            exit(1)
        # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
        self.job.run(self, group_data, vm_host_ip=self.vm_host_ip)
示例#10
0
class NodeDispatcher(object):

    group_name = ''
    client_name = ''
    group_size = 0
    target = ''
    role = ''
    poller = None
    oob_file = sys.stderr
    output_dir = None
    base_msg = None
    json_data = None
    vm_host_ip = None
    is_dynamic_vm = False

    def __init__(self, json_data, oob_file=sys.stderr, output_dir=None):
        """
        Parse the modified JSON to identify the group name,
        requested port for the group - node comms
        and get the designation for this node in the group.
        """
        settings = readSettings("/etc/lava-coordinator/lava-coordinator.conf")
        self.json_data = json_data
        # FIXME: do this with a schema once the API settles
        if 'target_group' not in json_data:
            raise ValueError("Invalid JSON to work with the MultiNode Coordinator: no target_group.")
        self.group_name = json_data['target_group']
        if 'group_size' not in json_data:
            raise ValueError("Invalid JSON to work with the Coordinator: no group_size")
        self.group_size = json_data["group_size"]
        if 'target' not in json_data:
            raise ValueError("Invalid JSON for a child node: no target designation.")
        self.target = json_data['target']
        if 'timeout' not in json_data:
            raise ValueError("Invalid JSON - no default timeout specified.")
        if "sub_id" not in json_data:
            logging.info("Error in JSON - no sub_id specified. Results cannot be aggregated.")
            json_data['sub_id'] = None
        if 'port' in json_data:
            # lava-coordinator provides a conffile for the port and blocksize.
            logging.debug("Port is no longer supported in the incoming JSON. Using %d", settings["port"])
        if 'role' in json_data:
            self.role = json_data['role']
        # look for a vm temporary device - vm_host is managed in boot_linaro_image.
        if 'is_vmhost' in json_data and not json_data['is_vmhost']:
            self.is_dynamic_vm = True
        # hostname of the server for the connection.
        if 'hostname' in json_data:
            # lava-coordinator provides a conffile for the group_hostname
            logging.debug("Coordinator hostname is no longer supported in the incoming JSON. Using %s",
                          settings['coordinator_hostname'])
        self.base_msg = {"port": settings['port'],
                         "blocksize": settings['blocksize'],
                         "poll_delay": settings["poll_delay"],
                         "timeout": json_data['timeout'],
                         "host": settings['coordinator_hostname'],
                         "client_name": json_data['target'],
                         "group_name": json_data['target_group'],
                         # hostname here is the node hostname, not the server.
                         "hostname": gethostname(),
                         "role": self.role,
                         }
        self.client_name = json_data['target']
        self.poller = Poller(json.dumps(self.base_msg))
        self.oob_file = oob_file
        self.output_dir = output_dir
        self.job = None

    def run(self):
        """
        Initialises the node into the group, registering the group if necessary
        (via group_size) and *waiting* until the rest of the group nodes also
        register before starting the actual job,
        Temporary devices in a vm_group do not begin running tests until
        the host is ready.
        """
        jobdata = json.dumps(self.json_data)
        config = get_config()
        if 'logging_level' in self.json_data:
            logging.root.setLevel(self.json_data["logging_level"])
        else:
            logging.root.setLevel(config.logging_level)
        # create the job so that logging is enabled, start the job later.
        self.job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
        init_msg = {"request": "group_data", "group_size": self.group_size}
        init_msg.update(self.base_msg)
        logging.info("Starting Multi-Node communications for group '%s'", self.group_name)
        logging.debug("init_msg %s", json.dumps(init_msg))
        response = json.loads(self.poller.poll(json.dumps(init_msg)))
        logging.info("Starting the test run for %s in group %s", self.client_name, self.group_name)

        # if this is a temporary device, wait for lava_vm_start from host
        # before starting job
        if self.is_dynamic_vm:
            logging.info("Waiting for host IP address ...")
            host_info = self.request_wait("lava_vm_start")  # blocking call
            host_data = json.loads(host_info)["message"]
            logging.info("Host data: %r", host_data)
            for host in host_data:
                self.vm_host_ip = host_data[host]['host_ip']
                logging.info("Host %s has IP address %s", host, self.vm_host_ip)

        self.run_tests(self.json_data, response)
        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
        fin_msg = {"request": "clear_group", "group_size": self.group_size}
        fin_msg.update(self.base_msg)
        logging.debug("fin_msg %s", json.dumps(fin_msg))
        self.poller.poll(json.dumps(fin_msg))

    def __call__(self, args):
        """ Makes the NodeDispatcher callable so that the test shell can send messages just using the
        NodeDispatcher object.
        This function blocks until the specified API call returns. Some API calls may involve a
        substantial period of polling.
        :param args: JSON string of the arguments of the API call to make
        :return: A Python object containing the reply dict from the API call
        """
        try:
            return self._select(json.loads(args))
        except KeyError:
            logging.warning("Unable to handle request for: %s", args)

    def _select(self, json_data):
        """ Determines which API call has been requested, makes the call, blocks and returns the reply.
        :param json_data: Python object of the API call
        :return: Python object containing the reply dict.
        """
        reply_str = ''
        if not json_data:
            logging.debug("Empty args")
            return
        if 'request' not in json_data:
            logging.debug("Bad call")
            return
        if json_data["request"] == "aggregate":
            # no message processing here, just the bundles.
            return self._aggregation(json_data)
        messageID = json_data['messageID']
        if json_data['request'] == "lava_sync":
            logging.info("requesting lava_sync '%s'", messageID)
            reply_str = self.request_sync(messageID)
        elif json_data['request'] == 'lava_wait':
            logging.info("requesting lava_wait '%s'", messageID)
            reply_str = self.request_wait(messageID)
        elif json_data['request'] == 'lava_wait_all':
            if 'role' in json_data and json_data['role'] is not None:
                reply_str = self.request_wait_all(messageID, json_data['role'])
                logging.info("requesting lava_wait_all '%s' '%s'", messageID, json_data['role'])
            else:
                logging.info("requesting lava_wait_all '%s'", messageID)
                reply_str = self.request_wait_all(messageID)
        elif json_data['request'] == "lava_send":
            logging.info("requesting lava_send %s", messageID)
            reply_str = self.request_send(messageID, json_data['message'])
        reply = json.loads(str(reply_str))
        if 'message' in reply:
            return reply['message']
        else:
            return reply['response']

    def _aggregation(self, json_data):
        """ Internal call to send the bundle message to the coordinator so that the node
        with sub_id zero will get the complete bundle and everyone else a blank bundle.
        :param json_data: Arbitrary data from the job which will form the result bundle
        """
        if json_data["bundle"] is None:
            logging.info("Notifying LAVA Coordinator of job completion")
        else:
            logging.info("Passing results bundle to LAVA Coordinator.")
        reply_str = self._send(json_data)
        reply = json.loads(str(reply_str))
        if 'message' in reply:
            return reply['message']
        else:
            return reply['response']

    def _send(self, msg):
        """ Internal call to perform the API call via the Poller.
        :param msg: The call-specific message to be wrapped in the base_msg primitive.
        :return: Python object of the reply dict.
        """
        new_msg = copy.deepcopy(self.base_msg)
        new_msg.update(msg)
        if 'bundle' in new_msg:
            logging.debug("sending result bundle")
        else:
            logging.debug("sending Message %s", json.dumps(new_msg))
        return self.poller.poll(json.dumps(new_msg))

    def request_wait_all(self, messageID, role=None):
        """
        Asks the Coordinator to send back a particular messageID
        and blocks until that messageID is available for all nodes in
        this group or all nodes with the specified role in this group.
        """
        # FIXME: if this node has not called request_send for the
        # messageID used for a wait_all, the node should log a warning
        # of a broken test definition.
        if role:
            return self._send({"request": "lava_wait_all",
                               "messageID": messageID,
                               "waitrole": role})
        else:
            return self._send({"request": "lava_wait_all",
                              "messageID": messageID})

    def request_wait(self, messageID):
        """
        Asks the Coordinator to send back a particular messageID
        and blocks until that messageID is available for this node
        """
        # use self.target as the node ID
        wait_msg = {"request": "lava_wait",
                    "messageID": messageID,
                    "nodeID": self.target}
        return self._send(wait_msg)

    def request_send(self, messageID, message):
        """
        Sends a message to the group via the Coordinator. The
        message is guaranteed to be available to all members of the
        group. The message is only picked up when a client in the group
        calls lava_wait or lava_wait_all.
        The message needs to be formatted JSON, not a simple string.
        { "messageID": "string", "message": { "key": "value"} }
        The message can consist of just the messageID:
        { "messageID": "string" }
        """
        send_msg = {"request": "lava_send",
                    "messageID": messageID,
                    "message": message}
        return self._send(send_msg)

    def request_sync(self, msg):
        """
        Creates and send a message requesting lava_sync
        """
        sync_msg = {"request": "lava_sync", "messageID": msg}
        return self._send(sync_msg)

    def run_tests(self, json_jobdata, group_data):
        if 'response' in group_data and group_data['response'] == 'nack':
            logging.error("Unable to initiliase a Multi-Node group - timed out waiting for other devices.")
            return
        if 'target' not in json_jobdata:
            logging.error("The job file does not specify a target device.")
            exit(1)
        # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
        self.job.run(self, group_data, vm_host_ip=self.vm_host_ip)