def main(): """Execute the main application. * Server|Client operations run within the foreground of the application. * Exec will submit a job to be run across the cluster. * Manage jobs will return information about the cluster and all executed jobs. This data will be returned in table format for easy consumptions. """ args, parser = _args() _mixin = mixin.Mixin(args=args) if args.mode == "server": server.Server(args=args).worker_run() elif args.mode == "client": client.Client(args=args).worker_run() elif args.mode in ["exec", "orchestrate"]: if args.mode == "exec": return_data = _mixin.run_exec() else: return_data = _mixin.run_orchestration() job_items = [i.decode() for i in return_data if i] if args.poll or args.stream or args.wait: failed = set() manage = user.Manage(args=args) run_indicator = args.wait and not args.debug with directord.Spinner(run=run_indicator) as indicator: for item in job_items: state, status, stdout, stderr, info = manage.poll_job( job_id=item ) if state is False: failed.add(item) if args.stream: for node in sorted( set( i for v in [stdout, stderr, info] for i in v.keys() ) ): for k, n, v in [ (node, name, d[node]) for name, d in [ ("STDOUT", stdout), ("STDERR", stderr), ] if node in d ]: print("{} -- {}\n{}".format(k, n, v)) if run_indicator: indicator.pipe_b.send(status) else: print(status) if any(failed): if args.check: print("FAILED JOBS") for item in failed: print(item) raise SystemExit(1) else: for item in job_items: print(item) elif args.mode == "manage": manage_exec = user.Manage(args=args) data = manage_exec.run() try: data = json.loads(data) except Exception as e: print("No valid data found: {}".format(str(e))) return else: if not data: raise SystemExit("No data found") if args.export_jobs or args.export_nodes: export_file = utils.dump_yaml( file_path=(args.export_jobs or args.export_nodes), data=dict(data), ) print("Exported data to [ {} ]".format(export_file)) return computed_values = dict() headings = ["KEY", "VALUE"] tabulated_data = None if data and isinstance(data, dict): tabulated_data = _mixin.return_tabulated_info(data=data) elif data and isinstance(data, list): if args.job_info: item = dict(data).get(args.job_info) if not item: print( "Job information for ID:{} was not found".format( args.job_info ) ) return tabulated_data = _mixin.return_tabulated_info(data=item) else: if args.list_jobs: restrict_headings = [ "PARENT_JOB_NAME", "VERB", "EXECUTION_TIME", "PROCESSING", "SUCCESS", "FAILED", ] else: restrict_headings = [ "EXPIRY", "VERSION", "HOST_UPTIME", "AGENT_UPTIME", "MACHINE_ID", "DRIVER", ] ( tabulated_data, headings, computed_values, ) = _mixin.return_tabulated_data( data=data, restrict_headings=restrict_headings ) if tabulated_data: utils.print_tabulated_data( data=[i for i in tabulated_data if i], headers=headings ) print("\nTotal Items: {}".format(len(tabulated_data))) for k, v in computed_values.items(): if isinstance(v, float): print("Total {}: {:.2f}".format(k, v)) else: print("Total {}: {}".format(k, v)) else: return elif args.mode == "bootstrap": _bootstrap = bootstrap.Bootstrap( args.catalog, args.key_file, args.threads, args.debug, ) _bootstrap.bootstrap_cluster() else: parser.print_help(sys.stderr) raise SystemExit("Mode is set to an unsupported value.")
def test_spinner_context_msg(self): with directord.Spinner(run=True) as indicator: msg = indicator.indicator_msg(msg="test") self.assertTrue(indicator.run) self.assertIsNone(msg)
def test_spinner_class(self): indicator = directord.Spinner(run=True) self.assertEqual(indicator.run, True) indicator.__exit__()
def test_spinner_context(self): with directord.Spinner(run=True) as indicator: self.assertTrue(indicator.run)
def bootstrap_cluster(self, run_indicator=None): """Run a cluster wide bootstrap using a catalog file. Cluster bootstrap requires a catalog file to run. Catalogs are broken up into two sections, `directord_server` and `directord_client`. All servers are processed serially and first. All clients are processing in parallel using a maximum of the threads argument. :param run_indicator: Enable | disable the run indicator :type run_indicator: Boolean :returns: Tuple """ q = self.get_queue() catalog = dict() if not self.catalog: raise SystemExit("No catalog was defined.") for c in self.catalog: utils.merge_dict(base=catalog, new=yaml.safe_load(c)) if run_indicator is None: run_indicator = self.debug with directord.Spinner(run=run_indicator, queue=q) as indicator: self.indicator = indicator directord_server = catalog.get("directord_server") if directord_server: self.log.debug("Loading server information") for s in self.bootstrap_catalog_entry( entry=directord_server, required_entries=["targets"] ): s["key_file"] = self.key_file catalog["directord_bootstrap"] = s self.bootstrap_run(job_def=s, catalog=catalog) directord_clients = catalog.get("directord_clients") if directord_clients: self.log.debug("Loading client information") for c in self.bootstrap_catalog_entry(entry=directord_clients): c["key_file"] = self.key_file q.put(c) threads = list() for _ in range(self.threads): threads.append( ( self.thread( target=self.bootstrap_q_processor, args=(q, catalog), ), True, ) ) else: self.run_threads(threads=threads) targets = set() while not self.return_queue.empty(): try: targets.add(self.return_queue.get_nowait()) except Exception: pass return tuple(sorted(targets))