Beispiel #1
0
def rich_table(results: AggregatedResult) -> None:
    console = Console()

    for hostname, host_result in results.items():
        table = Table(box=MINIMAL_DOUBLE_HEAD)
        table.add_column(hostname, justify="right", style="cyan", no_wrap=True)
        table.add_column("result")
        table.add_column("changed")

        for r in host_result:
            text = Text()
            if r.failed:
                text.append(f"{r.exception}", style="red")
            else:
                text.append(f"{r.result or ''}", style="green")

            changed = Text()
            if r.changed:
                color = "orange3"
            else:
                color = "green"
            changed.append(f"{r.changed}", style=color)

            table.add_row(r.name, text, changed)

        console.print(table)
Beispiel #2
0
def print_result(
    result: Result,
    host: Optional[str] = None,
    nr_vars: List[str] = None,
    failed: bool = False,
    severity_level: int = logging.INFO,
) -> None:
    updated_agg_result = AggregatedResult(result.name)
    for hostname, multi_result in result.items():
        updated_multi_result = MultiResult(result.name)
        for r in multi_result:
            if isinstance(r.result,
                          str) and r.result.startswith("Task skipped"):
                continue
            else:
                updated_multi_result.append(r)
        if updated_multi_result:
            updated_agg_result[hostname] = updated_multi_result

    if not updated_agg_result:
        return

    LOCK.acquire()
    try:
        _print_result(updated_agg_result, host, nr_vars, failed,
                      severity_level)
    finally:
        LOCK.release()
Beispiel #3
0
def general_result():
    task_name = "napalm_ping"
    confirmation_result = create_result(result_content="All pings executed")
    timeouted = create_result(
        TIMEOUT_MESSAGE,
        host="R3",
        destination=IP_6,
        failed=True,
        exception=ConnectionException(f"Cannot connect to {IP_6}"),
    )
    general_result = AggregatedResult(task_name)
    general_result["R1"] = create_multi_result(
        results=[
            confirmation_result,
            ping_r1_1.create_nornir_result(),
            ping_r1_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    general_result["R2"] = create_multi_result(
        results=[confirmation_result,
                 ping_r2.create_nornir_result()],
        task_name=task_name,
    )
    general_result["R3"] = create_multi_result(
        results=[
            confirmation_result,
            ping_r3.create_nornir_result(), timeouted
        ],
        task_name=task_name,
    )
    return general_result
Beispiel #4
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with tqdm(
                total=len(hosts),
                desc="progress",
        ) as progress:
            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update()
                    if worker_result.failed:
                        tqdm.write(f"{worker_result.host.name}: failure")
                    else:
                        tqdm.write(f"{worker_result.host.name}: success")

        return result
def test_nornsible_print_task_no_results():
    test_result = AggregatedResult("testresult")
    test_result["localhost"] = MultiResult("testresult")
    test_result["localhost"].append(
        Result(host="localhost", result="Task skipped", failed=False, changed=False)
    )
    output = print_result(test_result)
    assert output is None
def general_result(timeouted_multiresult):
    task_name = "napalm_get_facts"
    result = AggregatedResult(task_name)
    result["S1"] = create_multi_result(
        [config_s1.create_nornir_result()],
        task_name,
    )
    result["S3"] = timeouted_multiresult
    return result
Beispiel #7
0
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [create_result(nornir_raw_result_r1, task_name)], task_name)
    result["R2"] = create_multi_result(
        [create_result(nornir_raw_result_r2, task_name)], task_name)
    result["R3"] = timeouted_multiresult
    return result
def test_nornsible_print_task_results(capfd):
    test_result = AggregatedResult("testresult")
    test_result["localhost"] = MultiResult("testresult")
    test_result["localhost"].append(
        Result(host="localhost", result="stuff happening!", failed=False, changed=False)
    )
    print_result(test_result)
    std_out, std_err = capfd.readouterr()
    assert "stuff happening" in std_out
def general_result(timeouted_multiresult):
    task_name = "netmiko_send_command"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [create_result(raw_nornir_result_r1, task_name)], task_name)
    result["R2"] = create_multi_result(
        [create_result(raw_nornir_result_r2, task_name)], task_name)
    result["R3"] = timeouted_multiresult
    return result
Beispiel #10
0
    def task_completed(self, task: Task, result: AggregatedResult) -> None:

        # remove tasks with device's output
        if self.remove_tasks:
            for hostname, results in result.items():
                if len(results) >= self.len_tasks:
                    for i in range(0, self.len_tasks):
                        _ = results.pop(0)

        # remove non failed tasks if requested to do so
        if self.failed_only:
            for hostname, results in result.items():
                good_tests = []
                for index, i in enumerate(results):
                    if hasattr(i, "success") and i.success == True:
                        good_tests.append(index)
                # pop starting from last index to preserve lower indexes
                for i in reversed(good_tests):
                    _ = results.pop(i)
Beispiel #11
0
    def task_completed(self, task: Task, result: AggregatedResult) -> None:
        """
        When the overall stockpile task finishes, do the following:
            1) Print finish time and calculate run time
            2) Initialize our Git repository
            3) Write a CSV report on this backup task
            4) Add all written files to this commit, and commit it
        :param task:
        :param result:
        :return:
        """

        # Process our results into a CSV and write it to the stockpile output directory.

        task_end_time = datetime.datetime.utcnow()
        print(f"Backup Task End Time: {task_end_time.isoformat()}")
        print(
            f"Backup Task Elapsed Time: {task_end_time - self.task_start_time}"
        )

        # Plumb up Git repository
        repo = self.git_initialize(
            stockpile_directory=task.params["stockpile_directory"])
        author = Actor(name="Stockpiler", email="*****@*****.**")

        csv_out = pathlib.Path(
            f"{task.params['stockpile_directory']}/results.csv")
        print(f"Putting results into a CSV at {csv_out}")
        with csv_out.open(mode="w") as output_file:
            fieldnames = [
                i for i in next(result[x] for x in result)[0].result.keys()
                if i not in ["device_config"]
            ]

            writer = csv.DictWriter(output_file, fieldnames=fieldnames)

            writer.writeheader()
            for host in result.keys():
                # Don't try to write this if it's not a dict.
                if not isinstance(result[host][0].result, dict):
                    continue
                writer.writerow({
                    k: v
                    for (k, v) in result[host][0].result.items()
                    if k not in ["device_config"]
                })

        # Git Commit the changed/stockpiled files
        repo.git.add(
            all=True
        )  # Should be changed to explicitly add all filenames from the results... but that's harder
        repo.index.commit(
            message=
            f"Stockpile Built at {datetime.datetime.utcnow().isoformat()}",
            author=author)
Beispiel #12
0
def calculate_result(dc_runner: DCAwareRunner,
                     results: AggregatedResult) -> Dict[str, List[str]]:
    report: Dict[str, List[str]] = {
        "failed": [],
        "skipped": [],
        "completed": [h for h, r in results.items() if not r.failed],
    }
    for _, failed, skipped, _ in dc_runner.report():
        report["failed"].extend([h.name for h in failed])
        report["skipped"].extend([h.name for h in skipped])

    return report
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        result = AggregatedResult(task.name)
        futures = []
        with ThreadPoolExecutor(self.num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            result[worker_result.host.name] = worker_result
        return result
def general_result(timeouted_multiresult):
    task_name = "napalm_get_facts"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [users_r1_1.create_nornir_result(),
         users_r1_2.create_nornir_result()],
        task_name,
    )
    result["R2"] = create_multi_result([users_r2.create_nornir_result()],
                                       task_name)
    result["R3"] = timeouted_multiresult
    return result
Beispiel #15
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     connectors_q = queue.Queue()
     work_q = queue.Queue()
     result = AggregatedResult(task.name)
     # enqueue hosts in connectors queue
     for host in hosts:
         connectors_q.put(
             (task.copy(), host, {"connection_retry": 0, "task_retry": 0}, result)
         )
     # start connectors threads
     connector_threads = []
     for i in range(self.num_connectors):
         t = threading.Thread(target=self.connector, args=(connectors_q, work_q))
         t.start()
         connector_threads.append(t)
     # start worker threads
     worker_threads = []
     for i in range(self.num_workers):
         t = threading.Thread(target=self.worker, args=(connectors_q, work_q))
         t.start()
         worker_threads.append(t)
     # wait until all hosts completed task or timeout reached
     start_time = time.time()
     while True:
         with LOCK:
             hosts_no_result = [h.name for h in hosts if h.name not in result]
         if hosts_no_result == []:
             break
         if time.time() - start_time > self.task_timeout:
             log.error("RetryRunner task '{}', '{}' seconds wait timeout reached, hosts that did not return results '{}'".format(
                     task.name, self.task_timeout, hosts_no_result
                 )
             )
             break
         time.sleep(0.1)
     # block until all queues empty
     connectors_q.join()
     work_q.join()
     # stop connector threads
     for i in range(self.num_connectors):
         connectors_q.put(None)
     for t in connector_threads:
         t.join()
     # stop worker threads
     for i in range(self.num_workers):
         work_q.put(None)
     for t in worker_threads:
         t.join()
     # delete queues and threads
     del(connectors_q, work_q, connector_threads, worker_threads)
     return result
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        results=[
            bgp_r1_1.create_nornir_result(),
            bgp_r1_2.create_nornir_result()
        ],
        task_name=task_name,
    )
    result["R2"] = create_multi_result(results=[bgp_r2.create_nornir_result()],
                                       task_name=task_name)
    result["R3"] = timeouted_multiresult
    return result
Beispiel #17
0
    def _run_parallel(self, task: Task, hosts, num_workers, **kwargs):
        result = AggregatedResult(kwargs.get("name") or task.name)

        pool = Pool(processes=num_workers)
        result_pool = [
            pool.apply_async(task.copy().start, args=(h, self)) for h in hosts
        ]
        pool.close()
        pool.join()

        for rp in result_pool:
            r = rp.get()
            result[r.host.name] = r
        return result
Beispiel #18
0
    def _map_host_to_nutsresult(
            self, general_result: AggregatedResult) -> Dict[str, NutsResult]:
        """
        Maps a host's name to its corresponding result, which in turn is
        wrapped into a NutsResult.
        Used when a nornir tasks queries properties of a host.

        :param general_result: The raw result
                as provided by nornir's executed task
        :return: Host mapped to a NutsResult
        """
        return {
            host: self.nuts_result_wrapper(multiresult)
            for host, multiresult in general_result.items()
        }
Beispiel #19
0
def nr_result_serialize(result: AggregatedResult):
    if not isinstance(result, AggregatedResult):
        raise ValueError("result must be of type AggregatedResult")

    hosts = {}    
    for host, multires in result.items():
        hosts[host] = []
        for res in multires:
            hosts[host].append({
                'name': res.name,
                'result': res.result,
                'diff': res.diff,
                'failed': res.failed
            })
    return hosts
Beispiel #20
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:

        result = AggregatedResult(task.name)
        greenlets = []

        pool = Pool(self.num_workers)
        for host in hosts:
            greenlet = pool.spawn(task.copy().start, host)
            greenlets.append(greenlet)
        pool.join()

        for greenlet in greenlets:
            worker_result = greenlet.get()
            result[worker_result.host.name] = worker_result

        return result
Beispiel #21
0
def nr_result_serialize(result: AggregatedResult):
    if not isinstance(result, AggregatedResult):
        raise ValueError("result must be of type AggregatedResult")

    hosts = {}
    for host, multires in result.items():
        hosts[host] = {'failed': False, 'job_tasks': []}
        for res in multires:
            hosts[host]['job_tasks'].append({
                'task_name': res.name,
                'result': res.result,
                'diff': res.diff,
                'failed': res.failed
            })
            if res.failed:
                hosts[host]['failed'] = True
    return hosts
Beispiel #22
0
    def _map_host_to_dest_to_nutsresult(
        self,
        general_result: AggregatedResult,
    ) -> Dict[str, Dict[str, NutsResult]]:
        """
        Maps a host's name to its corresponding destination and calls a helper function
        to further map that destination to a NutsResult.

        Used when a host-destination pair is tested.

        :param general_result: The raw result as provided by nornir's executed task
        :return: The host mapped to its corresponding destination
                  mapped to its NutsResult
        """
        return {
            host: self._map_dest_to_nutsresult(task_results)
            for host, task_results in general_result.items()
        }
Beispiel #23
0
    def _run_parallel(
        self,
        task: Task,
        hosts: List["Host"],
        num_workers: int,
        **kwargs: Dict[str, Any],
    ) -> AggregatedResult:
        agg_result = AggregatedResult(kwargs.get("name") or task.name)
        futures = []
        with ThreadPoolExecutor(num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host, self)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            agg_result[worker_result.host.name] = worker_result
        return agg_result
def general_result(timeouted_multiresult):
    task_name = "napalm_get"
    result = AggregatedResult(task_name)
    result["R1"] = create_multi_result(
        [
            interfaces_r1_1.create_nornir_result(),
            interfaces_r1_2.create_nornir_result(),
        ],
        task_name,
    )
    result["R2"] = create_multi_result(
        [
            interfaces_r2_1.create_nornir_result(),
            interfaces_r2_2.create_nornir_result(),
        ],
        task_name,
    )
    result["R3"] = timeouted_multiresult
    return result
Beispiel #25
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     self.work_q = queue.Queue()
     result = AggregatedResult(task.name)
     # enqueue hosts in work queue
     for host in hosts:
         self.work_q.put((task, host, result))
     # start threads
     threads = []
     for i in range(self.num_workers):
         t = threading.Thread(target=self.worker, args=(), daemon=True)
         t.start()
         threads.append(t)
     # block until all tasks are done
     self.work_q.join()
     # stop workers:
     for i in range(self.num_workers):
         self.work_q.put(None)
     for t in threads:
         t.join()
     return result
Beispiel #26
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)
        with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
            futures = {
                pool.submit(task.copy().start, host): host
                for host in hosts
            }
            for future in as_completed(futures):
                worker_result = future.result()
                result[worker_result.host.name] = worker_result
                if worker_result.failed:
                    print(f'{worker_result.host.name} - fail')
                else:
                    print(f'{worker_result.host.name} - success')

        return result
Beispiel #27
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with Progress(
                "[progress.description]{task.description}",
                BarColumn(),
                "[progress.percentage]{task.completed:>3.0f}/{task.total}",
        ) as progress:

            num_hosts = len(hosts)
            total = progress.add_task("[cyan]Completed...", total=num_hosts)
            successful = progress.add_task("[green]Successful...",
                                           total=num_hosts)
            changed = progress.add_task("[orange3]Changed...", total=num_hosts)
            error = progress.add_task("[red]Failed...", total=num_hosts)

            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update(total, advance=1)
                    if worker_result.failed:
                        progress.update(error, advance=1)
                        progress.print(
                            f"[red]{worker_result.host.name}: failure")
                    else:
                        progress.update(successful, advance=1)
                        progress.print(
                            f"[green]{worker_result.host.name}: success")
                    if worker_result.changed:
                        progress.update(changed, advance=1)

        return result
Beispiel #28
0
def general_result():
    task_name = "netmiko_run_iperf"
    confirmation_result = create_result(result_content="iperf executed for host")
    general_result = AggregatedResult(task_name)
    general_result["L1"] = create_multi_result(
        results=[
            confirmation_result,
            iperf_l1_1.create_nornir_result(),
            iperf_l1_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    general_result["L2"] = create_multi_result(
        results=[
            confirmation_result,
            iperf_l2_1.create_nornir_result(),
            iperf_l2_2.create_nornir_result(),
        ],
        task_name=task_name,
    )
    return general_result
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """

        # first we create the root object with all the device groups in it
        self.root = sort_hosts(hosts)

        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        # when sending the tasks to the pool we will store the futures here
        futures = []

        with ThreadPoolExecutor(self.num_workers) as pool:
            while self.root.pending():
                # for as long as we have pending objects

                # we execute the task over a batch of devices and store
                # the futures
                for host in self.root.batch():
                    future = pool.submit(task.copy().start, host)
                    futures.append(future)

                # we process the futures
                while futures:
                    future = futures.pop(0)
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    if worker_result.failed:
                        self.root.fail(worker_result.host,
                                       worker_result[-1].exception)
                    else:
                        self.root.complete(worker_result.host)
                time.sleep(1)

        return result
Beispiel #30
0
def normalize_result(nornir_job_result: AggregatedResult) -> Tuple[Dict, Dict]:
    """
    get_host_data result parser.
    Returns LLDP and FACTS data dicts
    with hostname keys.
    """
    global_lldp_data = {}
    global_facts = {}
    for device, output in nornir_job_result.items():
        if output[0].failed:
            # Write default data to dicts if the task is failed.
            # Use host inventory object name as a key.
            global_lldp_data[device] = {}
            global_facts[device] = {
                'nr_role': nr.inventory.hosts[device].get('role', 'undefined'),
                'nr_ip': nr.inventory.hosts[device].get('hostname', 'n/a'),
            }
            continue
        # Use FQDN as unique ID for devices withing the script.
        device_fqdn = output[1].result['facts']['fqdn']
        if not device_fqdn:
            # If FQDN is not set use hostname.
            # LLDP TLV follows the same logic.
            device_fqdn = output[1].result['facts']['hostname']
        if not device_fqdn:
            # Use host inventory object name as a key if
            # neither FQDN nor hostname are set
            device_fqdn = device
        global_facts[device_fqdn] = output[1].result['facts']
        global_facts[device_fqdn]['nr_role'] = nr.inventory.hosts[device].get(
            'role', 'undefined')
        global_facts[device_fqdn]['nr_ip'] = nr.inventory.hosts[device].get(
            'hostname', 'n/a')
        global_lldp_data[device_fqdn] = output[1].result[
            'lldp_neighbors_detail']
    return global_lldp_data, global_facts