Exemple #1
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with tqdm(
                total=len(hosts),
                desc="progress",
        ) as progress:
            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update()
                    if worker_result.failed:
                        tqdm.write(f"{worker_result.host.name}: failure")
                    else:
                        tqdm.write(f"{worker_result.host.name}: success")

        return result
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        result = AggregatedResult(task.name)
        futures = []
        with ThreadPoolExecutor(self.num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            result[worker_result.host.name] = worker_result
        return result
Exemple #3
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     connectors_q = queue.Queue()
     work_q = queue.Queue()
     result = AggregatedResult(task.name)
     # enqueue hosts in connectors queue
     for host in hosts:
         connectors_q.put(
             (task.copy(), host, {"connection_retry": 0, "task_retry": 0}, result)
         )
     # start connectors threads
     connector_threads = []
     for i in range(self.num_connectors):
         t = threading.Thread(target=self.connector, args=(connectors_q, work_q))
         t.start()
         connector_threads.append(t)
     # start worker threads
     worker_threads = []
     for i in range(self.num_workers):
         t = threading.Thread(target=self.worker, args=(connectors_q, work_q))
         t.start()
         worker_threads.append(t)
     # wait until all hosts completed task or timeout reached
     start_time = time.time()
     while True:
         with LOCK:
             hosts_no_result = [h.name for h in hosts if h.name not in result]
         if hosts_no_result == []:
             break
         if time.time() - start_time > self.task_timeout:
             log.error("RetryRunner task '{}', '{}' seconds wait timeout reached, hosts that did not return results '{}'".format(
                     task.name, self.task_timeout, hosts_no_result
                 )
             )
             break
         time.sleep(0.1)
     # block until all queues empty
     connectors_q.join()
     work_q.join()
     # stop connector threads
     for i in range(self.num_connectors):
         connectors_q.put(None)
     for t in connector_threads:
         t.join()
     # stop worker threads
     for i in range(self.num_workers):
         work_q.put(None)
     for t in worker_threads:
         t.join()
     # delete queues and threads
     del(connectors_q, work_q, connector_threads, worker_threads)
     return result
Exemple #4
0
    def _run_parallel(self, task: Task, hosts, num_workers, **kwargs):
        result = AggregatedResult(kwargs.get("name") or task.name)

        pool = Pool(processes=num_workers)
        result_pool = [
            pool.apply_async(task.copy().start, args=(h, self)) for h in hosts
        ]
        pool.close()
        pool.join()

        for rp in result_pool:
            r = rp.get()
            result[r.host.name] = r
        return result
Exemple #5
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:

        result = AggregatedResult(task.name)
        greenlets = []

        pool = Pool(self.num_workers)
        for host in hosts:
            greenlet = pool.spawn(task.copy().start, host)
            greenlets.append(greenlet)
        pool.join()

        for greenlet in greenlets:
            worker_result = greenlet.get()
            result[worker_result.host.name] = worker_result

        return result
Exemple #6
0
    def _run_parallel(
        self,
        task: Task,
        hosts: List["Host"],
        num_workers: int,
        **kwargs: Dict[str, Any],
    ) -> AggregatedResult:
        agg_result = AggregatedResult(kwargs.get("name") or task.name)
        futures = []
        with ThreadPoolExecutor(num_workers) as pool:
            for host in hosts:
                future = pool.submit(task.copy().start, host, self)
                futures.append(future)

        for future in futures:
            worker_result = future.result()
            agg_result[worker_result.host.name] = worker_result
        return agg_result
Exemple #7
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)
        with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
            futures = {
                pool.submit(task.copy().start, host): host
                for host in hosts
            }
            for future in as_completed(futures):
                worker_result = future.result()
                result[worker_result.host.name] = worker_result
                if worker_result.failed:
                    print(f'{worker_result.host.name} - fail')
                else:
                    print(f'{worker_result.host.name} - success')

        return result
Exemple #8
0
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """
        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        with Progress(
                "[progress.description]{task.description}",
                BarColumn(),
                "[progress.percentage]{task.completed:>3.0f}/{task.total}",
        ) as progress:

            num_hosts = len(hosts)
            total = progress.add_task("[cyan]Completed...", total=num_hosts)
            successful = progress.add_task("[green]Successful...",
                                           total=num_hosts)
            changed = progress.add_task("[orange3]Changed...", total=num_hosts)
            error = progress.add_task("[red]Failed...", total=num_hosts)

            with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
                futures = {
                    pool.submit(task.copy().start, host): host
                    for host in hosts
                }
                for future in as_completed(futures):
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    progress.update(total, advance=1)
                    if worker_result.failed:
                        progress.update(error, advance=1)
                        progress.print(
                            f"[red]{worker_result.host.name}: failure")
                    else:
                        progress.update(successful, advance=1)
                        progress.print(
                            f"[green]{worker_result.host.name}: success")
                    if worker_result.changed:
                        progress.update(changed, advance=1)

        return result
    def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
        """
        This is where the magic happens
        """

        # first we create the root object with all the device groups in it
        self.root = sort_hosts(hosts)

        # we instantiate the aggregated result
        result = AggregatedResult(task.name)

        # when sending the tasks to the pool we will store the futures here
        futures = []

        with ThreadPoolExecutor(self.num_workers) as pool:
            while self.root.pending():
                # for as long as we have pending objects

                # we execute the task over a batch of devices and store
                # the futures
                for host in self.root.batch():
                    future = pool.submit(task.copy().start, host)
                    futures.append(future)

                # we process the futures
                while futures:
                    future = futures.pop(0)
                    worker_result = future.result()
                    result[worker_result.host.name] = worker_result
                    if worker_result.failed:
                        self.root.fail(worker_result.host,
                                       worker_result[-1].exception)
                    else:
                        self.root.complete(worker_result.host)
                time.sleep(1)

        return result
Exemple #10
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     result = AggregatedResult(task.name)
     for host in hosts:
         result[host.name] = task.copy().start(host)
     return result
Exemple #11
0
 def _run_serial(self, task: Task, hosts, **kwargs):
     result = AggregatedResult(kwargs.get("name") or task.name)
     for host in hosts:
         result[host.name] = task.copy().start(host, self)
     return result
Exemple #12
0
 def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
     connectors_q = queue.Queue()
     work_q = queue.Queue()
     stop_event = threading.Event()
     task_timeout_event = threading.Event()
     task_timeout_timer = threading.Timer(self.task_timeout,
                                          lambda: task_timeout_event.set())
     result = AggregatedResult(task.name)
     connector_threads = []
     worker_threads = []
     # enqueue hosts in connectors queue
     for host in hosts:
         connectors_q.put((task.copy(), host, {
             "connection_retry": 0,
             "task_retry": 0
         }, result))
     # start connectors threads
     for i in range(self.num_connectors):
         t = threading.Thread(
             target=connector,
             args=(
                 stop_event,
                 connectors_q,
                 work_q,
                 self.connect_backoff,
                 self.connect_splay,
                 self.connect_retry,
                 self.jumphosts_connections,
             ),
         )
         t.start()
         connector_threads.append(t)
     # start worker threads
     for i in range(self.num_workers):
         t = threading.Thread(
             target=worker,
             args=(
                 stop_event,
                 connectors_q,
                 work_q,
                 self.task_backoff,
                 self.task_splay,
                 self.task_retry,
                 self.connect_retry,
                 self.reconnect_on_fail,
             ),
         )
         t.start()
         worker_threads.append(t)
     # wait until all hosts completed task or timeout reached
     task_timeout_timer.start()
     while not task_timeout_event.is_set():
         with LOCK:
             hosts_no_result = [
                 h.name for h in hosts if h.name not in result
             ]
         if hosts_no_result == []:
             task_timeout_timer.cancel()
             break
         time.sleep(0.1)
     else:
         log.warning(
             "RetryRunner task '{}', '{}'s task_timeout reached, hosts no results '{}'"
             .format(task.name, self.task_timeout, hosts_no_result))
     # block until all queues empty
     connectors_q.join()
     work_q.join()
     # stop and delete connectors and workers threads
     stop_event.set()
     while connector_threads:
         _ = connector_threads.pop().join()
     while worker_threads:
         _ = worker_threads.pop().join()
     return result