Example #1
0
    def _run_all_tasks(self, job='default', tasks=None, *args):
        '''
        Runs all the tasks in a job in parallel.
        '''
        targets = []
        # Checks if task names are inputted
        if tasks:
            # checks if tasks is a list
            try:
                tasks = eval(tasks)
            except (NameError, SyntaxError):
                pass
            # checks if tasks is string
            if isinstance(tasks, str):
                tasks = tasks.split(',')
            # parses tasks and gets the keywords, args and kwargs
            for task in tasks:
                target_dict = {}
                target_dict['target'] = self._wrapped_f
                list_args = self._jobs[job].get(task, [])
                if not list_args:
                    raise TobyException("Task " + task + " is not defined")
                target_dict['args'] = []
                target_dict['args'].extend(list_args)
                target_dict['kwargs'] = {'job': job, 'task': task}
                targets.append(target_dict)
        # checks if a job name is inputted and parses tasks defined in it
        elif job is not 'default' and job:
            for task in self._jobs[job]:
                target_dict = {}
                target_dict['target'] = self._wrapped_f
                list_args = self._jobs[job].get(task)
                target_dict['args'] = []
                target_dict['args'].extend(list_args)
                target_dict['kwargs'] = {'job': job, 'task': task}
                targets.append(target_dict)
        # checks if keywords are passed directly to run in parallel
        elif args:
            targets = self._resolve_args('run', *args)
        else:
            raise TobyException("No arguments passed to run")
        # Creates threads, waits for thread execution to complete and returns results
        try:
            output = run_multiple(targets)
        except Exception as exp:
            raise TobyException("Unable to run one or more threads: %s" % exp)

        # Logs the output of the threads as a list
        t.log(output)

        # Raises Exception if any of the threads fail
        if False in output:
            raise TobyException("One or more tasks failed during execution")
        else:
            return output
Example #2
0
    def reboot(self,
               wait=0,
               mode='shell',
               timeout=None,
               interval=20,
               all=False,
               device_type=None,
               system_nodes=None,
               command_args=None):
        """
        Reboot device
        :param wait:
            *OPTIONAL* Time to sleep before reconnecting, Default value is 0

        :param mode:
            *OPTIONAL* Mode in which reboot needs to be executed. Default is 'shell'. Also supports 'cli'. mode=cli is
            valid only for Junos devices.

        :param timeout:
            *OPTIONAL* Time to reboot and connect to device. Default is set based the device model.

        :param interval:
            *OPTIONAL* Interval at which reconnect need to be attempted after reboot is performed. Default is 20 seconds

        :param all:
            *OPTIONAL* Valid only if the device is of Junos. When set to True, all JUNOS REs are rebooted
                 simultaneously. Default is False, where only the current RE is rebooted. If the OS is not JUNOS,
                 all=True raises an exception.

        :param device_type:
            *OPTIONAL* This option works only with 'text' channel.
            Value should be set to 'vmhost' to reboot the vmhost

        Returns: True if device is rebooted and reconnection is successful, else an Exception is raised        Example:
            device_object.reboot(all=True, timeout=200)

        Example: device_object.reboot(all=True)

        """
        if mode.upper(
        ) == 'CLI' and self.current_node.current_controller.os.upper(
        ) != 'JUNOS':
            raise TobyException(
                "Argument 'mode' can be set to CLI only if device is running Junos"
            )

        if self.is_evo() and mode.upper() == 'CLI':
            if all == 'True' or all == 'true':
                evo_version = self.current_node.current_controller.get_version(
                )
                evo_version_num = re.match(r'(\d+.\d).*', evo_version, re.I)
                evo_version_num = evo_version_num.group(1)
                if float(evo_version_num) < 19.4:
                    command = "request system shutdown reboot"
                else:
                    command = "request system reboot"
            else:
                current_controller_name = self.get_current_controller_name()
                command = "request node reboot %s" % (current_controller_name)
            status = self.current_node.current_controller.reboot(wait=wait, mode=mode, device_type=device_type, \
            command_args=command_args, timeout=timeout, interval=interval, command=command)

            if status:
                response = self.reconnect(all=True, timeout=1200)
                if response:
                    self.set_current_controller(controller='master',
                                                system_node='current')
                    self.log(level='INFO', message='Reboot successful')
                    return True
                else:
                    return False
            else:
                return False

        else:
            if all == 'True' or all == 'true':
                if self.current_node.current_controller.os.upper() != 'JUNOS':
                    raise TobyException(
                        "Argument 'all' can only be used to reboot Junos devices"
                    )
                list_of_dicts = []
                for node_name in self.nodes.keys():
                    for controller in self.nodes[node_name].controllers.values(
                    ):
                        list_of_dicts.append({
                            'fname': controller.reboot,
                            'kwargs': {
                                'wait': wait,
                                'mode': mode,
                                'timeout': timeout,
                                'device_type': device_type,
                                'command_args': command_args
                            },
                            'interval': interval
                        })
                if False in run_multiple(list_of_dicts):
                    raise TobyException("Unable to reboot all REs of device.")
                else:
                    self.set_current_controller(controller='master',
                                                system_node='current')
                    return True
            elif self.current_node.current_controller.os.upper() == 'JUNOS':
                return self.current_node.current_controller.reboot(
                    wait=wait,
                    mode=mode,
                    timeout=timeout,
                    interval=interval,
                    device_type=device_type,
                    command_args=command_args)
            else:
                return self.current_node.current_controller.reboot(
                    wait=wait, timeout=timeout, interval=interval)
Example #3
0
    def software_install(self, **kwargs):
        """
            Software install handle on device

            device_object.software_install(package =
            '/volume/openconfig/trunk/junos-openconfig-x86-32-0.0.0I20161227_1103_rbu-builder.tgz',
            progress = True)
        """
        release = None
        if 'release' in kwargs:
            release = kwargs.pop('release')
        issu = kwargs.get('issu', False)
        nssu = kwargs.get('nssu', False)
        reboot = kwargs.get('reboot', True)
        internal_call = kwargs.get("parallel", None)
        if internal_call:
            internal_call = False
        else:
            internal_call = True
        controllers_all = kwargs.pop('controllers_all', True)
        kwargs['all_re'] = False
        result_list = []
        if issu is True or nssu is True:
            status = self.current_node.current_controller.software_install(
                **kwargs)
            t.log(level="INFO", message="software_install status: %s" % status)
            if status:
                self.reconnect(all=True, timeout=1200)
            else:
                raise TobyException('ISSU/NSSU failed.', host_obj=self)
        else:
            if controllers_all:
                for node_name in self.nodes:
                    for controller_name in self.nodes[
                            node_name].controllers.keys():
                        result_list.append( \
                            {"target": self.nodes[node_name].controllers[controller_name].software_install, "delay": 4, "kwargs":kwargs})
                results = run_multiple(targets=result_list,
                                       internal_call=internal_call)
                if results.count(True) == len(result_list):
                    t.log(level="INFO",
                          message="software_install status: %s" % result_list)
                    self.set_current_controller(controller='master',
                                                system_node='current')
                else:
                    raise TobyException('Software Install failed.',
                                        host_obj=self)
            else:
                status = self.current_node.current_controller.software_install(
                    **kwargs)
                if status:
                    if self.vc:
                        self.reconnect(all=True, timeout=1200)
                    t.log(level="INFO",
                          message="software_install status: %s" % status)
                    self.set_current_controller(controller='master',
                                                system_node='current')
                else:
                    raise TobyException('Software Install failed.',
                                        host_obj=self)
        ## version check
        if release is not None:
            message = "ISSU/NSSU" if (issu or nssu) else "Junos"
            if check_version(device=self,
                             version=release,
                             operator='ge',
                             all=controllers_all):
                t.log(level="INFO",
                      message="%s Version check passed " % message)
            else:
                t.log(level="ERROR", message="Version check Failed")
                raise TobyException('Version check Failed.', host_obj=self)
        return True
Example #4
0
def dtcp_concurrent_trigger_test(**kwargs):
    """

    :param kwargs:
    device_id:                  router device id e.g."r0"
    max_trigger_count:          maximum trigger count, by default is 100, router support maximum 1024
    minimum_rx_percentage:                minimum percentage for received traffic
    maximum_rx_percentage:                maximum percentage for received traffic
    :return:
    """
    t.log("start dtcp concurrent trigger test")
    max_count = int(kwargs.get('max_trigger_count', 100))
    trigger_type = kwargs.get('trigger_type', 'interface_id')
    check_traffic = kwargs.get('verify_traffic', True)
    delay = kwargs.get('delay', 10)
    kwargs['mode'] = 'summary'
    cst.prepare_subscriber_traffic(**kwargs)
    t.log("trying to remove existing LI triggers")
    result = dtcp_list_li_trigger(**kwargs, noraise=True)
    if isinstance(result, list):
        dtcp_delete_li_trigger(criteria_id=result, **kwargs)
    response = get_dtcp_li_candidates(**kwargs)
    if len(response[trigger_type]) < max_count:
        max_count = len(response[trigger_type])
    users = response[trigger_type]
    group_a_count = round(len(users) / 2)
    group_a = random.sample(users, group_a_count)
    t.log("adding the dtcp trigger group first")
    dtcp_add_li_trigger(trigger_list=group_a, **kwargs)
    result = dtcp_list_li_trigger(**kwargs)

    obj1 = lambda: None
    obj2 = lambda: None
    obj1.member = group_a
    obj1.role = 'delete'
    obj1.criteria = result
    obj1.active = True
    group_b = [subs for subs in users if subs not in group_a]
    obj2.member = group_b
    obj2.role = 'add'
    obj2.active = False
    obj2.criteria = []
    for iteration in range(1, int(kwargs.get('iteration', 2)) + 1):
        t.log("start iteration #{} for parallel dtcp test".format(iteration))
        for subscriber in [obj1, obj2]:
            if subscriber.role == 'delete' and subscriber.active:
                dict2 = {
                    'target': dtcp_delete_li_trigger,
                    'delay': delay,
                    'kwargs': {
                        'criteria_id': subscriber.criteria,
                        **kwargs
                    }
                }
            elif subscriber.role == 'add' and not subscriber.active:
                dict1 = {
                    'target': dtcp_add_li_trigger,
                    'delay': delay,
                    'kwargs': {
                        'trigger_list': subscriber.member,
                        'trigger_type': trigger_type,
                        **kwargs
                    }
                }
        try:
            result = run_multiple([dict1, dict2])
        except:
            raise Exception("the parallel run of dtcp failed")

        if check_traffic:
            t.log("verify traffic in iteration {}".format(iteration))
            duration = kwargs.get('duration', 60)
            if 'duration' in kwargs:
                cst.start_traffic(**kwargs)
            else:
                cst.start_traffic(duration=duration, **kwargs)
            time.sleep(int(duration))
            maximum_percent = kwargs.get('maximum_rx_percentage', 100.5)
            minimum_percent = kwargs.get('minimum_rx_percentage', 99)
            t.log("expecting received traffic will be {} - {} percent in LI".
                  format(minimum_percent, maximum_percent))
            cst.verify_traffic(**kwargs)

        for subscriber in [obj1, obj2]:
            if subscriber.role == 'add' and result[0]:
                subscriber.role = 'delete'
                subscriber.active = True
                subscriber.criteria = result[0]
            else:
                subscriber.role = 'add'
                subscriber.active = False
                subscriber.criteria = []
        t.log(
            "after iteration #{}, current group1 role is {}, group2 role is {}"
            .format(iteration, obj1.role, obj2.role))
Example #5
0
def reboot_device_in_parallel(device_list, **kwargs):
    """Reboot list of devices in parallel

    Support JunOS and linux host both. For HA setup, all nodes will be rebooted.

    **Pay attention**: except option "device_list", all other options will set to all devices. This means if
    'device_list' include JunOS and Linux both, and set 'mode=cli', you will got RuntimeError because Linux do not
    support this argument.

    :param LIST|TUPLE device_list:
        **REQUIRED** A list of devices will reboot in parallel.

    :param INT|STR wait:
        *OPTIONAL* Wait time to re-connect all devices

    :param STR mode:
        *OPTIONAL* Rebooting mode that one of 'shell' or 'cli' for all devices. default: 'cli'

            cli - 'request system reboot'
            shell - 'reboot'

    :param INT|STR timeout:
        *OPTIONAL* Timeout to reboot and reconnect device. Default: 480 (sec)

    :param INT|STR interval:
        *OPTIONAL* Re-connect check interval. default: 20 (sec)

    :param STR device_type:
        *OPTIONAL* This option works only for 'cli' mode. Value should be set to 'vmhost' to reboot the vmhost

    :return:
        Return True if all devices reboot succeed, otherwise return False
    """
    if not isinstance(device_list, (list, tuple)):
        raise ValueError(
            "option 'device_list' must be a list or tuple, but got '{}'".
            format(type(device_list)))

    func_name = TOOL.get_current_function_name()
    device_list[0].log(message=TOOL.print_title(func_name), level="INFO")

    # will not change invoked method's default value
    options = {}
    for keyword in ("wait", "mode", "timeout", "interval", "device_type"):
        if keyword in kwargs:
            options[keyword] = kwargs[keyword]

    all_devices = []
    for device in device_list:
        if "node0" in dir(device):
            all_devices.append(device.node0)
            all_devices.append(device.node1)
        else:
            all_devices.append(device)

    all_device_name_list = []
    for device in all_devices:
        if "name" in dir(device):
            all_device_name_list.append(device.name)
        else:
            all_device_name_list.append(
                device.current_node.current_controller.name)

    list_of_dicts = []
    for device in all_devices:
        list_of_dicts.append({
            "fname": device.reboot,
            "kwargs": options,
        })
    reboot_result_list = run_multiple(list_of_dicts)
    return_value = True if len(all_devices) == reboot_result_list.count(
        True) else False

    msg = [
        "{} return value: {}".format(func_name, return_value),
        "Device List:",
    ]
    for hostname, result in zip(all_device_name_list, reboot_result_list):
        msg.append("\t{}: {}".format(hostname, result))

    device_list[0].log(message="\n".join(msg), level="INFO")
    return return_value
Example #6
0
    def l3_ha_upgrade_init(self, device_1, device_2, **kwargs):
        """Init upgrade environment on L3 HA setup

        1.  checking only 1 image in ~regress/Regression_image and ~regress/HIGHER_image folder
        2.  checking current image version whether same as ~regress/Regression_image folder

        :param OBJECT device_1 and device_2:
            **REQUIRED** 2 L3 HA device handlers.

        :param STR regression_image_folder:
            *OPTIONAL* regression_image_folder. Default: /var/home/regress/Regression_image

        :param STR higher_image_folder:
            *OPTIONAL* higher_image_folder. Default: /var/home/regress/HIGHER_image

        :param BOOL no_copy:
            *OPTIONAL* software install with 'no-copy' option. default: True

        :param BOOL no_validate:
            *OPTIONAL* software install with 'no-validate' option. default: True

        :param INT reboot_timeout:
            *OPTIONAL* device will reboot and reconnect after software installation, this option indicate how many secs the device reboot
                       finished. This method will reconnct device in loop every 20 secs until reach reboot_timeout. default: 600

        :param STR|LIST|TUPLE except_component:
            *OPTIONAL* skip specific device hardware component such as "SLOT 0 PIC 0", "SLOT 1", etc. More detail pls see
                       jnpr.toby.security.chassis.chassis.waiting_for_pic_online

        :param INT check_counter:
            *OPTIONAL* times to check all FPC online. default: 20

        :param INT check_interval:
            *OPTIONAL* time interval to check all FPC online. default: 60

        :return:
            Return True if all init success, or raise RuntimeError exception if:

                1.  Regression_image and HIGHER_image folder have more than 1 image or no image
                2.  upgrade to Regression_image failed
        """
        func_name = self.tool.get_current_function_name()
        device_1.log(message=func_name, level="INFO")

        options = {}
        options["regression_image_folder"] = kwargs.pop(
            "regression_image_folder", self.default["regression_image_folder"])
        options["higher_image_folder"] = kwargs.pop(
            "higher_image_folder", self.default["higher_image_folder"])
        options["no_copy"] = kwargs.pop("no_copy", True)
        options["no_validate"] = kwargs.pop("no_validate", True)
        options["reboot_timeout"] = int(
            kwargs.pop("reboot_timeout", self.default["reboot_timeout"]))
        options["except_component"] = kwargs.pop("except_component", ())
        options["check_counter"] = int(kwargs.pop("check_counter", 20))
        options["check_interval"] = float(kwargs.pop("check_interval", 60))

        names = {
            "device_1": device_1.get_host_name(),
            "device_2": device_2.get_host_name(),
        }
        device_handlers = {
            "device_1": device_1,
            "device_2": device_2,
        }

        # get regression image and higher image's path
        images = {}
        for keyword in device_handlers:
            images[keyword] = self.get_image(
                device=device_handlers[keyword],
                regression_image_folder=options["regression_image_folder"],
                higher_image_folder=options["higher_image_folder"],
                force_get=True,
            )

        if images["device_1"] is False or images["device_2"] is False:
            raise RuntimeError("No proper image found in '{}' and '{}'".format(
                options["regression_image_folder"],
                options["higher_image_folder"]))

        if images["device_1"]["regression_image_filename"] != images[
                "device_2"]["regression_image_filename"]:
            raise RuntimeError(
                "regression image filename are different between device_1 and device_2:\n\tdevice_1: {}\n\tdevice_2: {}"
                .format(images["device_1"]["regression_image_filename"],
                        images["device_2"]["regression_image_filename"]))

        thread_list = []
        for keyword in device_handlers:
            version_info = device_handlers[keyword].get_version_info(
                force_get=True)
            if version_info is False:  # pragma: no cover
                raise RuntimeError(
                    "Get device version info from '{}' failed.".format(
                        keyword))

            if self.issu.compare_version_string(
                    version_info["version"],
                    images[keyword]["regression_image_filename"]) is False:
                device_handlers[keyword].log(
                    message=
                    "'{}' version is different to '{}', will do upgrade for '{}'"
                    .format(version_info["version"],
                            images[keyword]["regression_image"],
                            names[keyword]),
                    level="INFO")

                thread_list.append({
                    "fname": device_handlers[keyword].software_install,
                    "kwargs": {
                        "package":
                        images[keyword]["regression_image_filename"],
                        "remote_path": options["regression_image_folder"],
                        "no_copy": options["no_copy"],
                        "validate": not options[
                            "no_validate"],  # user given no_validate which need reverse here
                        "reboot": True,
                        "timeout": options["reboot_timeout"],
                    }
                })

            else:
                device_1.log(
                    message="'{}' version match to '{}', skip upgrade on '{}'".
                    format(version_info["version"],
                           images[keyword]["regression_image"],
                           names[keyword]),
                    level="INFO")

        # If no upgrade needed, stop immediately
        if not thread_list:
            device_1.log(message="{} return value: True".format(func_name))
            return True

        try:
            run_multiple(thread_list)
        except (RunMultipleTimeoutException,
                RunMultipleException):  # pragma: no cover
            pass

        device_1.log(message="upgrade 2 devices done", level="INFO")

        # make sure all FPC online, and check node version whether same as regression image
        thread_list = []
        for keyword in device_handlers:
            device_handlers[keyword].log(
                message="waiting for '{}' all FPC online...".format(keyword),
                level="INFO")
            thread_list.append({
                "fname": self.chassis.waiting_for_pic_online,
                "kwargs": {
                    "device": device_handlers[keyword],
                    "except_component": options["except_component"],
                    "check_counter": options["check_counter"],
                    "check_interval": options["check_interval"],
                }
            })

        try:
            run_multiple(thread_list)
        except (RunMultipleTimeoutException,
                RunMultipleException):  # pragma: no cover
            pass

        device_1.log(message="waiting all FPC online for 2 devices finished",
                     level="INFO")
        device_1.log(
            message=
            "checking 2 nodes have same software version and match regression image...",
            level="INFO")
        version_info = {
            "device_1": device_1.get_version_info(force_get=True),
            "device_2": device_2.get_version_info(force_get=True),
        }

        if version_info["device_1"]["version"] != version_info["device_2"][
                "version"]:  # pragma: no cover
            raise RuntimeError(
                "2 devices have different version:\n\tdevice_1: {}\n\tdevice_2: {}"
                .format(version_info["device_1"]["version"],
                        version_info["device_2"]["version"]))

        if self.issu.compare_version_string(
                version_info["device_1"]["version"],
                images["device_1"]["regression_image_filename"]) is False:
            raise RuntimeError(
                "device version '{}' different as regression_image '{}'".
                format(version_info["device_1"]["version"], images["device_1"]
                       ["regression_image_filename"]))  # pragma: no cover

        device_1.log(
            message="2 devices' version '{}' same as regression image '{}'".
            format(version_info["device_1"]["version"],
                   images["device_1"]["regression_image_filename"]),
            level="INFO",
        )

        device_1.log(message="{} return value: True".format(func_name))
        return True