示例#1
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with tqdm(
                total=len(hosts),
                desc="progress",
        ) as progress:
            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update()
                    if worker_result.failed:
                        tqdm.write(f"{worker_result.host.name}: failure")
                    else:
                        tqdm.write(f"{worker_result.host.name}: success")

        return result
示例#2
0
def general_result():
    task_name = "napalm_ping"
    confirmation_result = create_result(result_content="All pings executed")
    timeouted = create_result(
        TIMEOUT_MESSAGE,
        host="R3",
        destination=IP_6,
        failed=True,
        exception=ConnectionException(f"Cannot connect to {IP_6}"),
    )
    general_result = AggregatedResult(task_name)
    general_result["R1"] = create_multi_result(
        results=[
            confirmation_result,
            ping_r1_1.create_nornir_result(),
            ping_r1_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    general_result["R2"] = create_multi_result(
        results=[confirmation_result,
                 ping_r2.create_nornir_result()],
        task_name=task_name,
    )
    general_result["R3"] = create_multi_result(
        results=[
            confirmation_result,
            ping_r3.create_nornir_result(), timeouted
        ],
        task_name=task_name,
    )
    return general_result
示例#3
0
def print_result(
    result: Result,
    host: Optional[str] = None,
    nr_vars: List[str] = None,
    failed: bool = False,
    severity_level: int = logging.INFO,
) -> None:
    updated_agg_result = AggregatedResult(result.name)
    for hostname, multi_result in result.items():
        updated_multi_result = MultiResult(result.name)
        for r in multi_result:
            if isinstance(r.result,
                          str) and r.result.startswith("Task skipped"):
                continue
            else:
                updated_multi_result.append(r)
        if updated_multi_result:
            updated_agg_result[hostname] = updated_multi_result

    if not updated_agg_result:
        return

    LOCK.acquire()
    try:
        _print_result(updated_agg_result, host, nr_vars, failed,
                      severity_level)
    finally:
        LOCK.release()
def test_nornsible_print_task_no_results():
    test_result = AggregatedResult("testresult")
    test_result["localhost"] = MultiResult("testresult")
    test_result["localhost"].append(
        Result(host="localhost", result="Task skipped", failed=False, changed=False)
    )
    output = print_result(test_result)
    assert output is None
示例#5
0
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [create_result(nornir_raw_result_r1, task_name)], task_name)
    result["R2"] = create_multi_result(
        [create_result(nornir_raw_result_r2, task_name)], task_name)
    result["R3"] = timeouted_multiresult
    return result
def test_nornsible_print_task_results(capfd):
    test_result = AggregatedResult("testresult")
    test_result["localhost"] = MultiResult("testresult")
    test_result["localhost"].append(
        Result(host="localhost", result="stuff happening!", failed=False, changed=False)
    )
    print_result(test_result)
    std_out, std_err = capfd.readouterr()
    assert "stuff happening" in std_out
def general_result(timeouted_multiresult):
    task_name = "netmiko_send_command"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [create_result(raw_nornir_result_r1, task_name)], task_name)
    result["R2"] = create_multi_result(
        [create_result(raw_nornir_result_r2, task_name)], task_name)
    result["R3"] = timeouted_multiresult
    return result
def general_result(timeouted_multiresult):
    task_name = "napalm_get_facts"
    result = AggregatedResult(task_name)
    result["S1"] = create_multi_result(
        [config_s1.create_nornir_result()],
        task_name,
    )
    result["S3"] = timeouted_multiresult
    return result
def general_result(timeouted_multiresult):
    task_name = "napalm_get_facts"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [users_r1_1.create_nornir_result(),
         users_r1_2.create_nornir_result()],
        task_name,
    )
    result["R2"] = create_multi_result([users_r2.create_nornir_result()],
                                       task_name)
    result["R3"] = timeouted_multiresult
    return result
示例#10
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        result = AggregatedResult(task.name)
        futures = []
        with ThreadPoolExecutor(self.num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            result[worker_result.host.name] = worker_result
        return result
示例#11
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     connectors_q = queue.Queue()
     work_q = queue.Queue()
     result = AggregatedResult(task.name)
     # enqueue hosts in connectors queue
     for host in hosts:
         connectors_q.put(
             (task.copy(), host, {"connection_retry": 0, "task_retry": 0}, result)
         )
     # start connectors threads
     connector_threads = []
     for i in range(self.num_connectors):
         t = threading.Thread(target=self.connector, args=(connectors_q, work_q))
         t.start()
         connector_threads.append(t)
     # start worker threads
     worker_threads = []
     for i in range(self.num_workers):
         t = threading.Thread(target=self.worker, args=(connectors_q, work_q))
         t.start()
         worker_threads.append(t)
     # wait until all hosts completed task or timeout reached
     start_time = time.time()
     while True:
         with LOCK:
             hosts_no_result = [h.name for h in hosts if h.name not in result]
         if hosts_no_result == []:
             break
         if time.time() - start_time > self.task_timeout:
             log.error("RetryRunner task '{}', '{}' seconds wait timeout reached, hosts that did not return results '{}'".format(
                     task.name, self.task_timeout, hosts_no_result
                 )
             )
             break
         time.sleep(0.1)
     # block until all queues empty
     connectors_q.join()
     work_q.join()
     # stop connector threads
     for i in range(self.num_connectors):
         connectors_q.put(None)
     for t in connector_threads:
         t.join()
     # stop worker threads
     for i in range(self.num_workers):
         work_q.put(None)
     for t in worker_threads:
         t.join()
     # delete queues and threads
     del(connectors_q, work_q, connector_threads, worker_threads)
     return result
示例#12
0
    def _run_parallel(self, task: Task, hosts, num_workers, **kwargs):
        result = AggregatedResult(kwargs.get("name") or task.name)

        pool = Pool(processes=num_workers)
        result_pool = [
            pool.apply_async(task.copy().start, args=(h, self)) for h in hosts
        ]
        pool.close()
        pool.join()

        for rp in result_pool:
            r = rp.get()
            result[r.host.name] = r
        return result
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        results=[
            bgp_r1_1.create_nornir_result(),
            bgp_r1_2.create_nornir_result()
        ],
        task_name=task_name,
    )
    result["R2"] = create_multi_result(results=[bgp_r2.create_nornir_result()],
                                       task_name=task_name)
    result["R3"] = timeouted_multiresult
    return result
示例#14
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:

        result = AggregatedResult(task.name)
        greenlets = []

        pool = Pool(self.num_workers)
        for host in hosts:
            greenlet = pool.spawn(task.copy().start, host)
            greenlets.append(greenlet)
        pool.join()

        for greenlet in greenlets:
            worker_result = greenlet.get()
            result[worker_result.host.name] = worker_result

        return result
示例#15
0
    def _run_parallel(
        self,
        task: Task,
        hosts: List["Host"],
        num_workers: int,
        **kwargs: Dict[str, Any],
    ) -> AggregatedResult:
        agg_result = AggregatedResult(kwargs.get("name") or task.name)
        futures = []
        with ThreadPoolExecutor(num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host, self)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            agg_result[worker_result.host.name] = worker_result
        return agg_result
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [
            interfaces_r1_1.create_nornir_result(),
            interfaces_r1_2.create_nornir_result(),
        ],
        task_name,
    )
    result["R2"] = create_multi_result(
        [
            interfaces_r2_1.create_nornir_result(),
            interfaces_r2_2.create_nornir_result(),
        ],
        task_name,
    )
    result["R3"] = timeouted_multiresult
    return result
示例#17
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)
        with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
            futures = {
                pool.submit(task.copy().start, host): host
                for host in hosts
            }
            for future in as_completed(futures):
                worker_result = future.result()
                result[worker_result.host.name] = worker_result
                if worker_result.failed:
                    print(f'{worker_result.host.name} - fail')
                else:
                    print(f'{worker_result.host.name} - success')

        return result
示例#18
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     self.work_q = queue.Queue()
     result = AggregatedResult(task.name)
     # enqueue hosts in work queue
     for host in hosts:
         self.work_q.put((task, host, result))
     # start threads
     threads = []
     for i in range(self.num_workers):
         t = threading.Thread(target=self.worker, args=(), daemon=True)
         t.start()
         threads.append(t)
     # block until all tasks are done
     self.work_q.join()
     # stop workers:
     for i in range(self.num_workers):
         self.work_q.put(None)
     for t in threads:
         t.join()
     return result
示例#19
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with Progress(
                "[progress.description]{task.description}",
                BarColumn(),
                "[progress.percentage]{task.completed:>3.0f}/{task.total}",
        ) as progress:

            num_hosts = len(hosts)
            total = progress.add_task("[cyan]Completed...", total=num_hosts)
            successful = progress.add_task("[green]Successful...",
                                           total=num_hosts)
            changed = progress.add_task("[orange3]Changed...", total=num_hosts)
            error = progress.add_task("[red]Failed...", total=num_hosts)

            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update(total, advance=1)
                    if worker_result.failed:
                        progress.update(error, advance=1)
                        progress.print(
                            f"[red]{worker_result.host.name}: failure")
                    else:
                        progress.update(successful, advance=1)
                        progress.print(
                            f"[green]{worker_result.host.name}: success")
                    if worker_result.changed:
                        progress.update(changed, advance=1)

        return result
示例#20
0
def general_result():
    task_name = "netmiko_run_iperf"
    confirmation_result = create_result(result_content="iperf executed for host")
    general_result = AggregatedResult(task_name)
    general_result["L1"] = create_multi_result(
        results=[
            confirmation_result,
            iperf_l1_1.create_nornir_result(),
            iperf_l1_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    general_result["L2"] = create_multi_result(
        results=[
            confirmation_result,
            iperf_l2_1.create_nornir_result(),
            iperf_l2_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    return general_result
示例#21
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """

        # first we create the root object with all the device groups in it
        self.root = sort_hosts(hosts)

        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        # when sending the tasks to the pool we will store the futures here
        futures = []

        with ThreadPoolExecutor(self.num_workers) as pool:
            while self.root.pending():
                # for as long as we have pending objects

                # we execute the task over a batch of devices and store
                # the futures
                for host in self.root.batch():
                    future = pool.submit(task.copy().start, host)
                    futures.append(future)

                # we process the futures
                while futures:
                    future = futures.pop(0)
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    if worker_result.failed:
                        self.root.fail(worker_result.host,
                                       worker_result[-1].exception)
                    else:
                        self.root.complete(worker_result.host)
                time.sleep(1)

        return result
示例#22
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     result = AggregatedResult(task.name)
     for host in hosts:
         result[host.name] = task.copy().start(host)
     return result
示例#23
0
 def _run_serial(self, task, hosts, **kwargs):
     result = AggregatedResult(kwargs.get("name") or task.__name__)
     for host in hosts:
         result[host.name] = Task(task, **kwargs).start(host, self)
     return result
示例#24
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     connectors_q = queue.Queue()
     work_q = queue.Queue()
     stop_event = threading.Event()
     task_timeout_event = threading.Event()
     task_timeout_timer = threading.Timer(self.task_timeout,
                                          lambda: task_timeout_event.set())
     result = AggregatedResult(task.name)
     connector_threads = []
     worker_threads = []
     # enqueue hosts in connectors queue
     for host in hosts:
         connectors_q.put((task.copy(), host, {
             "connection_retry": 0,
             "task_retry": 0
         }, result))
     # start connectors threads
     for i in range(self.num_connectors):
         t = threading.Thread(
             target=connector,
             args=(
                 stop_event,
                 connectors_q,
                 work_q,
                 self.connect_backoff,
                 self.connect_splay,
                 self.connect_retry,
                 self.jumphosts_connections,
             ),
         )
         t.start()
         connector_threads.append(t)
     # start worker threads
     for i in range(self.num_workers):
         t = threading.Thread(
             target=worker,
             args=(
                 stop_event,
                 connectors_q,
                 work_q,
                 self.task_backoff,
                 self.task_splay,
                 self.task_retry,
                 self.connect_retry,
                 self.reconnect_on_fail,
             ),
         )
         t.start()
         worker_threads.append(t)
     # wait until all hosts completed task or timeout reached
     task_timeout_timer.start()
     while not task_timeout_event.is_set():
         with LOCK:
             hosts_no_result = [
                 h.name for h in hosts if h.name not in result
             ]
         if hosts_no_result == []:
             task_timeout_timer.cancel()
             break
         time.sleep(0.1)
     else:
         log.warning(
             "RetryRunner task '{}', '{}'s task_timeout reached, hosts no results '{}'"
             .format(task.name, self.task_timeout, hosts_no_result))
     # block until all queues empty
     connectors_q.join()
     work_q.join()
     # stop and delete connectors and workers threads
     stop_event.set()
     while connector_threads:
         _ = connector_threads.pop().join()
     while worker_threads:
         _ = worker_threads.pop().join()
     return result
示例#25
0
def print_table_result(
    result: ScrapliResult,
    failed: bool = False,
    severity_level: int = logging.INFO,
    parser: str = "textfsm",
    to_dict: bool = True,
    fail_to_string: bool = False,
) -> None:
    """
    Prints the :obj:`nornir.core.task.Result` from a previous task to screen

    Arguments:
        result: from a previous task
        failed: if `True` assume the task failed
        severity_level: Print only errors with this severity level or higher
        parser: textfsm|genie -- parser to parse output with
        to_dict: output structured data in dict form instead --
        basically put k:v instead of just
            lists of lists of values for textfsm output;
            ignored if parser == "genie"
        fail_to_string: fallback to printing unstructured output
        or have tasks skipped (because
            print_result won't print empty lists which scrapli
            returns if parsing fails)

    """
    updated_agg_result = AggregatedResult(result.name)
    # breakpoint()
    console = Console()
    table = Table(
        "Hostname", "Version", "Platform", "Image ID", "Image Type",
        "Uptime", "System Image", "Compiled Date")
    for hostname, multi_result in result.items():
        updated_multi_result = MultiResult(result.name)
        for individual_result in multi_result:
            scrapli_responses = getattr(
                individual_result, "scrapli_response", None)
            if isinstance(scrapli_responses, Response):
                scrapli_responses = [scrapli_responses]
            if not scrapli_responses:
                updated_multi_result.append(individual_result)
                continue
            for scrapli_response in scrapli_responses:
                parser_method = getattr(
                    scrapli_response, f"{parser}_parse_output")
                updated_result = Result(
                    host=individual_result.host,
                    changed=individual_result.changed,
                    diff=individual_result.diff,
                    exception=individual_result.exception,
                    failed=individual_result.failed,
                    name=individual_result.name,
                    severity_level=individual_result.severity_level,
                    stderr=individual_result.stderr,
                    stdout=individual_result.stdout,
                )

                if parser == "textfsm":
                    structured_result = parser_method(to_dict=to_dict)
                else:
                    structured_result = parser_method()

                if not structured_result and fail_to_string:
                    updated_result.result = scrapli_response.result
                else:
                    updated_result.result = structured_result
                updated_multi_result.append(updated_result)
            try:
                version = structured_result['version']
                table.add_row(f'[green]{version["hostname"]}[/green]',
                              f'[blue]{version["version"]}[/blue]',
                              f'[magenta]{version["platform"]}[/magenta]',
                              f'[cyan]{version["image_id"]}[/cyan]',
                              f'[orange1]{version["image_type"]}[/orange1]',
                              f'[bright_green]{version["uptime"]}[/bright_green]',
                              f'[magenta]{version["system_image"]}[/magenta]',
                              f'[yellow]{version["compiled_date"]}[/yellow]')
            except KeyError:
                print("This command is not supported in Table format!")
        if updated_multi_result:
            updated_agg_result[hostname] = updated_multi_result  # noqa

    LOCK.acquire()
    try:
        console.print(table)
    finally:
        LOCK.release()
示例#26
0
L        10.0.0.15/32 is directly connected, GigabitEthernet1"""

RAW_RESULT = "\n".join([IOSXE_SHOW_VERSION, IOSXE_SHOW_IP_ROUTE])

TEST_SCRAPLI_RESPONSE_ONE = Response(host="sea-ios-1",
                                     channel_input="show version",
                                     textfsm_platform="cisco_iosxe")
TEST_SCRAPLI_RESPONSE_ONE._record_response(result=IOSXE_SHOW_VERSION)
TEST_SCRAPLI_RESPONSE_TWO = Response(host="sea-ios-1",
                                     channel_input="show ip route",
                                     textfsm_platform="cisco_iosxe")
TEST_SCRAPLI_RESPONSE_TWO._record_response(result=IOSXE_SHOW_IP_ROUTE)
TEST_SCRAPLI_RESPONSE = [TEST_SCRAPLI_RESPONSE_ONE, TEST_SCRAPLI_RESPONSE_TWO]

TEST_HOST = Host(name="sea-ios-1")
TEST_AGG_RESULT = AggregatedResult("send_commands")
TEST_MULTI_RESULT = MultiResult("send_commands")
TEST_RESULT = Result(host=TEST_HOST, result=RAW_RESULT, name="send_commands")
setattr(TEST_RESULT, "scrapli_response", TEST_SCRAPLI_RESPONSE)

TEST_MULTI_RESULT.append(TEST_RESULT)

TEST_AGG_RESULT[TEST_HOST.name] = TEST_MULTI_RESULT


@pytest.mark.parametrize(
    "to_dict",
    [
        (
            True,
            "\x1b[1m\x1b[36msend_commands*******************************************************************\n\x1b[1m\x1b[34m* sea-ios-1 ** changed : False *************************************************\n\x1b[1m\x1b[32mvvvv send_commands ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO\n[ { 'config_register': '0x2102',\n    'hardware': ['CSR1000V'],\n    'hostname': 'csr1000v',\n    'mac': [],\n    'reload_reason': 'reload',\n    'rommon': 'IOS-XE',\n    'running_image': 'packages.conf',\n    'serial': ['9FKLJWM5EB0'],\n    'uptime': '2 hours, 43 minutes',\n    'version': '16.4.1'}]\n\x1b[1m\x1b[32m---- send_commands ** changed : False ------------------------------------------ INFO\n[ { 'distance': '',\n    'mask': '24',\n    'metric': '',\n    'network': '10.0.0.0',\n    'nexthop_if': 'GigabitEthernet1',\n    'nexthop_ip': '',\n    'protocol': 'C',\n    'type': '',\n    'uptime': ''},\n  { 'distance': '',\n    'mask': '32',\n    'metric': '',\n    'network': '10.0.0.15',\n    'nexthop_if': 'GigabitEthernet1',\n    'nexthop_ip': '',\n    'protocol': 'L',\n    'type': '',\n    'uptime': ''}]\n\x1b[1m\x1b[32m^^^^ END send_commands ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
示例#27
0
def print_structured_result(
    result: ScrapliResult,
    failed: bool = False,
    severity_level: int = logging.INFO,
    parser: str = "textfsm",
    to_dict: bool = True,
    fail_to_string: bool = False,
) -> None:
    """
    Prints the :obj:`nornir.core.task.Result` from a previous task to screen

    Arguments:
        result: from a previous task
        failed: if `True` assume the task failed
        severity_level: Print only errors with this severity level or higher
        parser: textfsm|genie -- parser to parse output with
        to_dict: output structured data in dict form instead -- basically put k:v instead of just
            lists of lists of values for textfsm output; ignored if parser == "genie"
        fail_to_string: fallback to printing unstructured output or have tasks skipped (because
            print_result won't print empty lists which scrapli returns if parsing fails)

    """
    updated_agg_result = AggregatedResult(result.name)
    for hostname, multi_result in result.items():
        updated_multi_result = MultiResult(result.name)
        for individual_result in multi_result:
            scrapli_responses = getattr(individual_result, "scrapli_response",
                                        None)
            if isinstance(scrapli_responses, Response):
                scrapli_responses = [scrapli_responses]
            if not scrapli_responses:
                updated_multi_result.append(individual_result)
                continue
            for scrapli_response in scrapli_responses:
                parser_method = getattr(scrapli_response,
                                        f"{parser}_parse_output")
                updated_result = Result(
                    host=individual_result.host,
                    changed=individual_result.changed,
                    diff=individual_result.diff,
                    exception=individual_result.exception,
                    failed=individual_result.failed,
                    name=individual_result.name,
                    severity_level=individual_result.severity_level,
                    stderr=individual_result.stderr,
                    stdout=individual_result.stdout,
                )

                if parser == "textfsm":
                    structured_result = parser_method(to_dict=to_dict)
                else:
                    structured_result = parser_method()

                if not structured_result and fail_to_string:
                    updated_result.result = scrapli_response.result
                else:
                    updated_result.result = structured_result
                updated_multi_result.append(updated_result)
        if updated_multi_result:
            updated_agg_result[hostname] = updated_multi_result  # noqa

    LOCK.acquire()
    try:
        _print_result(result=updated_agg_result,
                      attrs=None,
                      failed=failed,
                      severity_level=severity_level)
    finally:
        LOCK.release()
示例#28
0
 def _run_serial(self, task: Task, hosts, **kwargs):
     result = AggregatedResult(kwargs.get("name") or task.name)
     for host in hosts:
         result[host.name] = task.copy().start(host, self)
     return result