def monitor(run_once=False): term = Terminal() r = redis_client with term.fullscreen(), term.hidden_cursor(), term.cbreak(): val = None start_width = int(term.width / 8) while val not in (u'q', u'Q',): col_width = int(term.width / 8) # In case of resize if col_width != start_width: print(term.clear) start_width = col_width print(term.move(0, 0) + term.black_on_green(term.center('Host', width=col_width - 1))) print(term.move(0, 1 * col_width) + term.black_on_green(term.center('Id', width=col_width - 1))) print(term.move(0, 2 * col_width) + term.black_on_green(term.center('Status', width=col_width - 1))) print(term.move(0, 3 * col_width) + term.black_on_green(term.center('Pool', width=col_width - 1))) print(term.move(0, 4 * col_width) + term.black_on_green(term.center('TQ', width=col_width - 1))) print(term.move(0, 5 * col_width) + term.black_on_green(term.center('RQ', width=col_width - 1))) print(term.move(0, 6 * col_width) + term.black_on_green(term.center('RC', width=col_width - 1))) print(term.move(0, 7 * col_width) + term.black_on_green(term.center('Up', width=col_width - 1))) i = 2 stats = Stat.get_all(r=r) print(term.clear_eos()) for stat in stats: # color status if stat.status == Conf.WORKING: status = term.green(Conf.WORKING) elif stat.status == Conf.STOPPED: status = term.red(Conf.STOPPED) elif stat.status == Conf.IDLE: status = Conf.IDLE else: status = term.yellow(stat.status) # color q's tasks = stat.task_q_size if tasks > 0: tasks = term.cyan(str(tasks)) results = stat.done_q_size if results > 0: results = term.cyan(str(results)) # color workers workers = len(stat.workers) if workers < Conf.WORKERS: workers = term.yellow(str(workers)) # format uptime uptime = (timezone.now() - stat.tob).total_seconds() hours, remainder = divmod(uptime, 3600) minutes, seconds = divmod(remainder, 60) uptime = '%d:%02d:%02d' % (hours, minutes, seconds) # print to the terminal print(term.move(i, 0) + term.center(stat.host[:col_width - 1], width=col_width - 1)) print(term.move(i, 1 * col_width) + term.center(stat.cluster_id, width=col_width - 1)) print(term.move(i, 2 * col_width) + term.center(status, width=col_width - 1)) print(term.move(i, 3 * col_width) + term.center(workers, width=col_width - 1)) print(term.move(i, 4 * col_width) + term.center(tasks, width=col_width - 1)) print(term.move(i, 5 * col_width) + term.center(results, width=col_width - 1)) print(term.move(i, 6 * col_width) + term.center(stat.reincarnations, width=col_width - 1)) print(term.move(i, 7 * col_width) + term.center(uptime, width=col_width - 1)) i += 1 # for testing if run_once: return Stat.get_all(r=r) print(term.move(i + 2, 0) + term.center('[Press q to quit]')) val = term.inkey(timeout=1)
def monitor(run_once=False, broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() with term.fullscreen(), term.hidden_cursor(), term.cbreak(): val = None start_width = int(term.width / 8) while val not in ( "q", "Q", ): col_width = int(term.width / 8) # In case of resize if col_width != start_width: print(term.clear()) start_width = col_width print( term.move(0, 0) + term.black_on_green(term.center(_("Host"), width=col_width - 1))) print( term.move(0, 1 * col_width) + term.black_on_green(term.center(_("Id"), width=col_width - 1))) print( term.move(0, 2 * col_width) + term.black_on_green( term.center(_("State"), width=col_width - 1))) print( term.move(0, 3 * col_width) + term.black_on_green(term.center(_("Pool"), width=col_width - 1))) print( term.move(0, 4 * col_width) + term.black_on_green(term.center(_("TQ"), width=col_width - 1))) print( term.move(0, 5 * col_width) + term.black_on_green(term.center(_("RQ"), width=col_width - 1))) print( term.move(0, 6 * col_width) + term.black_on_green(term.center(_("RC"), width=col_width - 1))) print( term.move(0, 7 * col_width) + term.black_on_green(term.center(_("Up"), width=col_width - 1))) i = 2 stats = Stat.get_all(broker=broker) print(term.clear_eos()) for stat in stats: status = stat.status # color status if stat.status == Conf.WORKING: status = term.green(str(Conf.WORKING)) elif stat.status == Conf.STOPPING: status = term.yellow(str(Conf.STOPPING)) elif stat.status == Conf.STOPPED: status = term.red(str(Conf.STOPPED)) elif stat.status == Conf.IDLE: status = str(Conf.IDLE) # color q's tasks = str(stat.task_q_size) if stat.task_q_size > 0: tasks = term.cyan(str(stat.task_q_size)) if Conf.QUEUE_LIMIT and stat.task_q_size == Conf.QUEUE_LIMIT: tasks = term.green(str(stat.task_q_size)) results = stat.done_q_size if results > 0: results = term.cyan(str(results)) # color workers workers = len(stat.workers) if workers < Conf.WORKERS: workers = term.yellow(str(workers)) # format uptime uptime = (timezone.now() - stat.tob).total_seconds() hours, remainder = divmod(uptime, 3600) minutes, seconds = divmod(remainder, 60) uptime = "%d:%02d:%02d" % (hours, minutes, seconds) # print to the terminal print( term.move(i, 0) + term.center(stat.host[:col_width - 1], width=col_width - 1)) print( term.move(i, 1 * col_width) + term.center(str(stat.cluster_id)[-8:], width=col_width - 1)) print( term.move(i, 2 * col_width) + term.center(status, width=col_width - 1)) print( term.move(i, 3 * col_width) + term.center(workers, width=col_width - 1)) print( term.move(i, 4 * col_width) + term.center(tasks, width=col_width - 1)) print( term.move(i, 5 * col_width) + term.center(results, width=col_width - 1)) print( term.move(i, 6 * col_width) + term.center(stat.reincarnations, width=col_width - 1)) print( term.move(i, 7 * col_width) + term.center(uptime, width=col_width - 1)) i += 1 # bottom bar i += 1 queue_size = broker.queue_size() lock_size = broker.lock_size() if lock_size: queue_size = f"{queue_size}({lock_size})" print( term.move(i, 0) + term.white_on_cyan( term.center(broker.info(), width=col_width * 2))) print( term.move(i, 2 * col_width) + term.black_on_cyan(term.center(_("Queued"), width=col_width))) print( term.move(i, 3 * col_width) + term.white_on_cyan(term.center(queue_size, width=col_width))) print( term.move(i, 4 * col_width) + term.black_on_cyan(term.center(_("Success"), width=col_width))) print( term.move(i, 5 * col_width) + term.white_on_cyan( term.center(models.Success.objects.count(), width=col_width))) print( term.move(i, 6 * col_width) + term.black_on_cyan(term.center(_("Failures"), width=col_width)) ) print( term.move(i, 7 * col_width) + term.white_on_cyan( term.center(models.Failure.objects.count(), width=col_width))) # for testing if run_once: return Stat.get_all(broker=broker) print(term.move(i + 2, 0) + term.center(_("[Press q to quit]"))) val = term.inkey(timeout=1)
def info(broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() stat = Stat.get_all(broker=broker) # general stats clusters = len(stat) workers = 0 reincarnations = 0 for cluster in stat: workers += len(cluster.workers) reincarnations += cluster.reincarnations # calculate tasks pm and avg exec time tasks_per = 0 per = _("day") exec_time = 0 last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24)) tasks_per_day = last_tasks.count() if tasks_per_day > 0: # average execution time over the last 24 hours if connection.vendor != "sqlite": exec_time = last_tasks.aggregate( time_taken=Sum(F("stopped") - F("started"))) exec_time = exec_time["time_taken"].total_seconds() / tasks_per_day else: # can't sum timedeltas on sqlite for t in last_tasks: exec_time += t.time_taken() exec_time = exec_time / tasks_per_day # tasks per second/minute/hour/day in the last 24 hours if tasks_per_day > 24 * 60 * 60: tasks_per = tasks_per_day / (24 * 60 * 60) per = _("second") elif tasks_per_day > 24 * 60: tasks_per = tasks_per_day / (24 * 60) per = _("minute") elif tasks_per_day > 24: tasks_per = tasks_per_day / 24 per = _("hour") else: tasks_per = tasks_per_day # print to terminal print(term.clear_eos()) col_width = int(term.width / 6) print( term.black_on_green( term.center( _(f'-- {Conf.PREFIX.capitalize()} { ".".join(str(v) for v in VERSION)} on {broker.info()} --' )))) print( term.cyan(_("Clusters")) + term.move_x(1 * col_width) + term.white(str(clusters)) + term.move_x(2 * col_width) + term.cyan(_("Workers")) + term.move_x(3 * col_width) + term.white(str(workers)) + term.move_x(4 * col_width) + term.cyan(_("Restarts")) + term.move_x(5 * col_width) + term.white(str(reincarnations))) print( term.cyan(_("Queued")) + term.move_x(1 * col_width) + term.white(str(broker.queue_size())) + term.move_x(2 * col_width) + term.cyan(_("Successes")) + term.move_x(3 * col_width) + term.white(str(models.Success.objects.count())) + term.move_x(4 * col_width) + term.cyan(_("Failures")) + term.move_x(5 * col_width) + term.white(str(models.Failure.objects.count()))) print( term.cyan(_("Schedules")) + term.move_x(1 * col_width) + term.white(str(models.Schedule.objects.count())) + term.move_x(2 * col_width) + term.cyan(_(f"Tasks/{per}")) + term.move_x(3 * col_width) + term.white(f"{tasks_per:.2f}") + term.move_x(4 * col_width) + term.cyan(_("Avg time")) + term.move_x(5 * col_width) + term.white(f"{exec_time:.4f}")) return True
def memory(run_once=False, workers=False, broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() if not psutil: print(term.clear_eos()) print( term.white_on_red( 'Cannot start "qmemory" command. Missing "psutil" library.')) return with term.fullscreen(), term.hidden_cursor(), term.cbreak(): MEMORY_AVAILABLE_LOWEST_PERCENTAGE = 100.0 MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT = timezone.now() cols = 8 val = None start_width = int(term.width / cols) while val not in ["q", "Q"]: col_width = int(term.width / cols) # In case of resize if col_width != start_width: print(term.clear()) start_width = col_width # sentinel, monitor and workers memory usage print( term.move(0, 0 * col_width) + term.black_on_green(term.center(_("Host"), width=col_width - 1))) print( term.move(0, 1 * col_width) + term.black_on_green(term.center(_("Id"), width=col_width - 1))) print( term.move(0, 2 * col_width) + term.black_on_green( term.center(_("Available (%)"), width=col_width - 1))) print( term.move(0, 3 * col_width) + term.black_on_green( term.center(_("Available (MB)"), width=col_width - 1))) print( term.move(0, 4 * col_width) + term.black_on_green( term.center(_("Total (MB)"), width=col_width - 1))) print( term.move(0, 5 * col_width) + term.black_on_green( term.center(_("Sentinel (MB)"), width=col_width - 1))) print( term.move(0, 6 * col_width) + term.black_on_green( term.center(_("Monitor (MB)"), width=col_width - 1))) print( term.move(0, 7 * col_width) + term.black_on_green( term.center(_("Workers (MB)"), width=col_width - 1))) row = 2 stats = Stat.get_all(broker=broker) print(term.clear_eos()) for stat in stats: # memory available (%) memory_available_percentage = round( psutil.virtual_memory().available * 100 / psutil.virtual_memory().total, 2, ) # memory available (MB) memory_available = round( psutil.virtual_memory().available / 1024**2, 2) if memory_available_percentage < MEMORY_AVAILABLE_LOWEST_PERCENTAGE: MEMORY_AVAILABLE_LOWEST_PERCENTAGE = memory_available_percentage MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT = timezone.now() print( term.move(row, 0 * col_width) + term.center(stat.host[:col_width - 1], width=col_width - 1)) print( term.move(row, 1 * col_width) + term.center(str(stat.cluster_id)[-8:], width=col_width - 1)) print( term.move(row, 2 * col_width) + term.center( memory_available_percentage, width=col_width - 1)) print( term.move(row, 3 * col_width) + term.center(memory_available, width=col_width - 1)) print( term.move(row, 4 * col_width) + term.center( round(psutil.virtual_memory().total / 1024**2, 2), width=col_width - 1, )) print( term.move(row, 5 * col_width) + term.center( get_process_mb(stat.sentinel), width=col_width - 1)) print( term.move(row, 6 * col_width) + term.center( get_process_mb(getattr(stat, "monitor", None)), width=col_width - 1, )) workers_mb = 0 for worker_pid in stat.workers: result = get_process_mb(worker_pid) if isinstance(result, str): result = 0 workers_mb += result print( term.move(row, 7 * col_width) + term.center(workers_mb or "NO_PROCESSES_FOUND", width=col_width - 1)) row += 1 # each worker's memory usage if workers: row += 2 col_width = int(term.width / (1 + Conf.WORKERS)) print( term.move(row, 0 * col_width) + term.black_on_cyan( term.center(_("Id"), width=col_width - 1))) for worker_num in range(Conf.WORKERS): print( term.move(row, (worker_num + 1) * col_width) + term.black_on_cyan( term.center( "Worker #{} (MB)".format(worker_num + 1), width=col_width - 1, ))) row += 2 for stat in stats: print( term.move(row, 0 * col_width) + term.center( str(stat.cluster_id)[-8:], width=col_width - 1)) for idx, worker_pid in enumerate(stat.workers): mb_used = get_process_mb(worker_pid) print( term.move(row, (idx + 1) * col_width) + term.center(mb_used, width=col_width - 1)) row += 1 row += 1 print( term.move(row, 0) + _("Available lowest (%): {} ({})").format( str(MEMORY_AVAILABLE_LOWEST_PERCENTAGE), MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT.strftime( "%Y-%m-%d %H:%M:%S+00:00"), )) # for testing if run_once: return Stat.get_all(broker=broker) print(term.move(row + 2, 0) + term.center("[Press q to quit]")) val = term.inkey(timeout=1)
def info(broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() stat = Stat.get_all(broker=broker) # general stats clusters = len(stat) workers = 0 reincarnations = 0 for cluster in stat: workers += len(cluster.workers) reincarnations += cluster.reincarnations # calculate tasks pm and avg exec time tasks_per = 0 per = _('day') exec_time = 0 last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24)) tasks_per_day = last_tasks.count() if tasks_per_day > 0: # average execution time over the last 24 hours if not connection.vendor == 'sqlite': exec_time = last_tasks.aggregate(time_taken=Sum(F('stopped') - F('started'))) exec_time = exec_time['time_taken'].total_seconds() / tasks_per_day else: # can't sum timedeltas on sqlite for t in last_tasks: exec_time += t.time_taken() exec_time = exec_time / tasks_per_day # tasks per second/minute/hour/day in the last 24 hours if tasks_per_day > 24 * 60 * 60: tasks_per = tasks_per_day / (24 * 60 * 60) per = _('second') elif tasks_per_day > 24 * 60: tasks_per = tasks_per_day / (24 * 60) per = _('minute') elif tasks_per_day > 24: tasks_per = tasks_per_day / 24 per = _('hour') else: tasks_per = tasks_per_day # print to terminal print(term.clear_eos()) col_width = int(term.width / 6) print(term.black_on_green( term.center( _('-- {} {} on {} --').format(Conf.PREFIX.capitalize(), '.'.join(str(v) for v in VERSION), broker.info())))) print(term.cyan(_('Clusters')) + term.move_x(1 * col_width) + term.white(str(clusters)) + term.move_x(2 * col_width) + term.cyan(_('Workers')) + term.move_x(3 * col_width) + term.white(str(workers)) + term.move_x(4 * col_width) + term.cyan(_('Restarts')) + term.move_x(5 * col_width) + term.white(str(reincarnations)) ) print(term.cyan(_('Queued')) + term.move_x(1 * col_width) + term.white(str(broker.queue_size())) + term.move_x(2 * col_width) + term.cyan(_('Successes')) + term.move_x(3 * col_width) + term.white(str(models.Success.objects.count())) + term.move_x(4 * col_width) + term.cyan(_('Failures')) + term.move_x(5 * col_width) + term.white(str(models.Failure.objects.count())) ) print(term.cyan(_('Schedules')) + term.move_x(1 * col_width) + term.white(str(models.Schedule.objects.count())) + term.move_x(2 * col_width) + term.cyan(_('Tasks/{}'.format(per))) + term.move_x(3 * col_width) + term.white('{0:.2f}'.format(tasks_per)) + term.move_x(4 * col_width) + term.cyan(_('Avg time')) + term.move_x(5 * col_width) + term.white('{0:.4f}'.format(exec_time)) ) return True
def monitor(run_once=False, broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() with term.fullscreen(), term.hidden_cursor(), term.cbreak(): val = None start_width = int(term.width / 8) while val not in (u'q', u'Q',): col_width = int(term.width / 8) # In case of resize if col_width != start_width: print(term.clear()) start_width = col_width print(term.move(0, 0) + term.black_on_green(term.center(_('Host'), width=col_width - 1))) print(term.move(0, 1 * col_width) + term.black_on_green(term.center(_('Id'), width=col_width - 1))) print(term.move(0, 2 * col_width) + term.black_on_green(term.center(_('State'), width=col_width - 1))) print(term.move(0, 3 * col_width) + term.black_on_green(term.center(_('Pool'), width=col_width - 1))) print(term.move(0, 4 * col_width) + term.black_on_green(term.center(_('TQ'), width=col_width - 1))) print(term.move(0, 5 * col_width) + term.black_on_green(term.center(_('RQ'), width=col_width - 1))) print(term.move(0, 6 * col_width) + term.black_on_green(term.center(_('RC'), width=col_width - 1))) print(term.move(0, 7 * col_width) + term.black_on_green(term.center(_('Up'), width=col_width - 1))) i = 2 stats = Stat.get_all(broker=broker) print(term.clear_eos()) for stat in stats: status = stat.status # color status if stat.status == Conf.WORKING: status = term.green(str(Conf.WORKING)) elif stat.status == Conf.STOPPING: status = term.yellow(str(Conf.STOPPING)) elif stat.status == Conf.STOPPED: status = term.red(str(Conf.STOPPED)) elif stat.status == Conf.IDLE: status = str(Conf.IDLE) # color q's tasks = str(stat.task_q_size) if stat.task_q_size > 0: tasks = term.cyan(str(stat.task_q_size)) if Conf.QUEUE_LIMIT and stat.task_q_size == Conf.QUEUE_LIMIT: tasks = term.green(str(stat.task_q_size)) results = stat.done_q_size if results > 0: results = term.cyan(str(results)) # color workers workers = len(stat.workers) if workers < Conf.WORKERS: workers = term.yellow(str(workers)) # format uptime uptime = (timezone.now() - stat.tob).total_seconds() hours, remainder = divmod(uptime, 3600) minutes, seconds = divmod(remainder, 60) uptime = '%d:%02d:%02d' % (hours, minutes, seconds) # print to the terminal print(term.move(i, 0) + term.center(stat.host[:col_width - 1], width=col_width - 1)) print(term.move(i, 1 * col_width) + term.center(stat.cluster_id, width=col_width - 1)) print(term.move(i, 2 * col_width) + term.center(status, width=col_width - 1)) print(term.move(i, 3 * col_width) + term.center(workers, width=col_width - 1)) print(term.move(i, 4 * col_width) + term.center(tasks, width=col_width - 1)) print(term.move(i, 5 * col_width) + term.center(results, width=col_width - 1)) print(term.move(i, 6 * col_width) + term.center(stat.reincarnations, width=col_width - 1)) print(term.move(i, 7 * col_width) + term.center(uptime, width=col_width - 1)) i += 1 # bottom bar i += 1 queue_size = broker.queue_size() lock_size = broker.lock_size() if lock_size: queue_size = '{}({})'.format(queue_size, lock_size) print(term.move(i, 0) + term.white_on_cyan(term.center(broker.info(), width=col_width * 2))) print(term.move(i, 2 * col_width) + term.black_on_cyan(term.center(_('Queued'), width=col_width))) print(term.move(i, 3 * col_width) + term.white_on_cyan(term.center(queue_size, width=col_width))) print(term.move(i, 4 * col_width) + term.black_on_cyan(term.center(_('Success'), width=col_width))) print(term.move(i, 5 * col_width) + term.white_on_cyan( term.center(models.Success.objects.count(), width=col_width))) print(term.move(i, 6 * col_width) + term.black_on_cyan(term.center(_('Failures'), width=col_width))) print(term.move(i, 7 * col_width) + term.white_on_cyan( term.center(models.Failure.objects.count(), width=col_width))) # for testing if run_once: return Stat.get_all(broker=broker) print(term.move(i + 2, 0) + term.center(_('[Press q to quit]'))) val = term.inkey(timeout=1)
elif args.host: hosts.append(args.host) queue = [] t = Terminal() print(t.clear) for host in hosts: if urlparse(host).netloc: if args.base_dir: host+=base_dir fdb = FDB( host = host, wordlist=args.wordlist, extensions= args.extensions, limit=args.limit, verbosity=args.verbosity, output_directory=args.output_directory, terminal=t, ) queue.append(fdb) else: print_warning("Malformed host {host} line".format(host=host)) fdbc = FDBController(terminal=t) fdbc.run(queue) with t.location(0,args.threads+5): print(t.center(t.black_on_green("All FDBs completed."))) with t.cbreak(): t.inkey() print(t.clear)
def monitor(run_once=False): term = Terminal() r = redis_client try: redis_client.ping() except Exception as e: print(term.red("Can not connect to Redis server.")) logger.exception(e) raise e with term.fullscreen(), term.hidden_cursor(), term.cbreak(): val = None start_width = int(term.width / 8) while val not in (u"q", u"Q"): col_width = int(term.width / 8) # In case of resize if col_width != start_width: print(term.clear) start_width = col_width print(term.move(0, 0) + term.black_on_green(term.center(_("Host"), width=col_width - 1))) print(term.move(0, 1 * col_width) + term.black_on_green(term.center(_("Id"), width=col_width - 1))) print(term.move(0, 2 * col_width) + term.black_on_green(term.center(_("State"), width=col_width - 1))) print(term.move(0, 3 * col_width) + term.black_on_green(term.center(_("Pool"), width=col_width - 1))) print(term.move(0, 4 * col_width) + term.black_on_green(term.center(_("TQ"), width=col_width - 1))) print(term.move(0, 5 * col_width) + term.black_on_green(term.center(_("RQ"), width=col_width - 1))) print(term.move(0, 6 * col_width) + term.black_on_green(term.center(_("RC"), width=col_width - 1))) print(term.move(0, 7 * col_width) + term.black_on_green(term.center(_("Up"), width=col_width - 1))) i = 2 stats = Stat.get_all(r=r) print(term.clear_eos()) for stat in stats: status = stat.status # color status if stat.status == Conf.WORKING: status = term.green(str(Conf.WORKING)) elif stat.status == Conf.STOPPING: status = term.yellow(str(Conf.STOPPING)) elif stat.status == Conf.STOPPED: status = term.red(str(Conf.STOPPED)) elif stat.status == Conf.IDLE: status = str(Conf.IDLE) # color q's tasks = stat.task_q_size if tasks > 0: tasks = term.cyan(str(tasks)) results = stat.done_q_size if results > 0: results = term.cyan(str(results)) # color workers workers = len(stat.workers) if workers < Conf.WORKERS: workers = term.yellow(str(workers)) # format uptime uptime = (timezone.now() - stat.tob).total_seconds() hours, remainder = divmod(uptime, 3600) minutes, seconds = divmod(remainder, 60) uptime = "%d:%02d:%02d" % (hours, minutes, seconds) # print to the terminal print(term.move(i, 0) + term.center(stat.host[: col_width - 1], width=col_width - 1)) print(term.move(i, 1 * col_width) + term.center(stat.cluster_id, width=col_width - 1)) print(term.move(i, 2 * col_width) + term.center(status, width=col_width - 1)) print(term.move(i, 3 * col_width) + term.center(workers, width=col_width - 1)) print(term.move(i, 4 * col_width) + term.center(tasks, width=col_width - 1)) print(term.move(i, 5 * col_width) + term.center(results, width=col_width - 1)) print(term.move(i, 6 * col_width) + term.center(stat.reincarnations, width=col_width - 1)) print(term.move(i, 7 * col_width) + term.center(uptime, width=col_width - 1)) i += 1 # for testing if run_once: return Stat.get_all(r=r) print(term.move(i + 2, 0) + term.center(_("[Press q to quit]"))) val = term.inkey(timeout=1)
class IriTop: global HEADERES def __init__(self, args): """ This instantiates the Terminal class from blessed. Terminal type can be forced via TERM environment variable. Terminal types supported by the system can normally be found via 'find /usr/share/terminfo -type f -printf "%f\n"' As an example setting to vt200 ensures no color output. """ self.term = Terminal() self.prev = {} self.poll_delay = args.poll_delay self.blink_delay = args.blink_delay """ The commands sent in the query to the node """ self.commands = [{'command': 'getNeighbors'}, {'command': 'getNodeInfo'}] self.txkeys = [{'keyshort': 'ad', 'sortkey': '1', 'header': 'Neighbor Address', 'key': 'neighborAddress', 'col': 0, 'sortcolumn': 'address'}, {'keyshort': 'at', 'sortkey': '2', 'header': 'All tx', 'key': 'numberOfAllTransactions', 'col': 3, 'sortcolumn': 'numberOfAllTransactions'}, {'keyshort': 'nt', 'sortkey': '3', 'header': 'New tx', 'key': 'numberOfNewTransactions', 'col': 4, 'sortcolumn': 'numberOfNewTransactions'}, {'keyshort': 'st', 'sortkey': '4', 'header': 'Sent tx', 'key': 'numberOfSentTransactions', 'col': 5, 'sortcolumn': 'numberOfSentTransactions'}, {'keyshort': 'rt', 'sortkey': '5', 'header': 'Random tx', 'key': 'numberOfRandomTransactionRequests', 'col': 6, 'sortcolumn': 'numberOfRandomTransactionRequests'}, {'keyshort': 'it', 'sortkey': '6', 'header': 'Invalid tx', 'key': 'numberOfInvalidTransactions', 'col': 7, 'sortcolumn': 'numberOfInvalidTransactions'}, {'keyshort': 'xt', 'sortkey': '7', 'header': 'Stale tx', 'key': 'numberOfStaleTransactions', 'col': 8, 'sortcolumn': 'numberOfStaleTransactions'}] self.randSeed = random.randint(0, 100000) self.baseline = dict() self.baselineStr = ['Off', 'On'] self.baselineToggle = 0 self.obscureAddrToggle = args.obscure_address self.width = 0 self.height = 0 self.oldheight = 0 self.oldwidth = 0 self.incommunicados = 0 self.localhost = self.set_local_node() self.duration_hist = list() self.duration = 0 self.duration_avg = 0 self.sortmode = False self.sortcolumn = None self.sortorderlist = ["", " "+u"\u25BC", " "+u"\u25B2"] self.sortorder = None self.mss_0 = "" self.prev_ms_start = 0 # Initiate column sort if args.sort: try: if args.sort < 0: self.sortorder = self.sortorderlist[1] else: self.sortorder = self.sortorderlist[2] args.sort = abs(args.sort) self.sortcolumn = self.txkeys[args.sort-1]['sortcolumn'] except IndexError: self.sortcolumn = self.txkeys[0]['sortcolumn'] # Set authentication header if required if args.username is not None: auth_str = '%s:%s' % (args.username, args.password) auth_token = base64.b64encode(auth_str.encode("utf-8")) HEADERS['Authorization'] = 'Basic %s' % auth_token.decode() @property def get_local_ips(self): return check_output(['/bin/hostname', '--all-ip-addresses'] ).rstrip().split() def set_local_node(self): local_ips = ['localhost', '127.0.0.1', '::1'] local_ips.extend(self.get_local_ips) if urlparse(NODE.lower()).hostname in local_ips: return True return False def run(self, stdscr): """ Clear the screen on start """ stdscr.clear() """ Counter for number of cycles """ cycles = 0 node = None print("IRITop connecting to node %s..." % self.showAddress(NODE)) with self.term.hidden_cursor(): val = "" tlast = 0 self.hist = {} while val.lower() != 'q': """ Exit if max cycles specified """ if int(MAX_CYCLES) != 0 and cycles >= int(MAX_CYCLES): break random.seed(self.randSeed) val = self.term.inkey(timeout=self.blink_delay) # Sort mode detection if val.lower() == 's': if self.sortmode is False: self.sortmode = True else: self.sortmode = False if self.sortmode: if self.sortorder is None: self.sortorder = self.sortorderlist[2] keylist = [] for k in self.txkeys: keylist.append(k['sortkey']) key = val.lower() if key in keylist: for k in self.txkeys: if key == k['sortkey']: # Toggle sort direction if self.sortcolumn == k['sortcolumn']: if self.sortorder == self.sortorderlist[2]: self.sortorder = self.sortorderlist[1] else: self.sortorder = self.sortorderlist[2] else: self.sortorder = self.sortorderlist[2] # Set sort column self.sortcolumn = k['sortcolumn'] self.sortmode = False self.oldheight, self.oldwidth = self.height, self.width self.height, self.width = self.term.height, self.term.width time_past = int(time.time()) - tlast time_remain = self.poll_delay - time_past if time_past > self.poll_delay: if node: self.prev_ms_start = node["milestoneStartIndex"] """ Query data from node, save duration """ startTime = int(round(time.time() * 1000)) results = [fetch_data(self.commands[i]) for i in range(len(self.commands))] endTime = int(round(time.time() * 1000)) self.logDuration(endTime - startTime) """ Increase iteration cycle """ cycles += 1 """ Process response data """ neighbors = None node = None for data, e in results: if e is not None: raise Exception("Error fetching data from node:" " %s\n" % e) if 'appName' in data.keys(): node = data elif 'neighbors' in data.keys(): neighbors = data['neighbors'] tlast = int(time.time()) for neighbor in neighbors: for txkey in self.txkeys[1:]: if txkey['key'] not in neighbor: neighbor[txkey['key']] = 0 neighbor[txkey['keyshort']] = 0 neighbor['%sDelta' % txkey['key']] = 0 # Keep history of tx tx_history = {} for neighbor in neighbors: for txkey in self.txkeys[1:]: self.historizer(txkey['keyshort'], txkey['key'], tx_history, neighbor) self.hist = tx_history if val.lower() == 'o': self.obscureAddrToggle = self.obscureAddrToggle ^ 1 if val.lower() == 'b': for neighbor in neighbors: for txkey in self.txkeys[1:]: self.baseline[self.getBaselineKey(neighbor, txkey['keyshort'])] = \ neighbor[txkey['key']] self.baselineToggle = self.baselineToggle ^ 1 if ((self.oldheight != self.height) or (self.oldwidth != self.width)): print(self.term.clear) print(self.term.move(0, 0) + self.term.black_on_cyan( "IRITop - Simple IOTA IRI Node Monitor (%s)" .ljust(self.width) % __VERSION__)) s = str(time_remain) if time_remain > 0 else 'fetch' print(self.term.move(0, self.width-6) + self.term.black_on_cyan(s.rjust(6))) for neighbor in neighbors: for txkey in self.txkeys[1:]: key = self.getBaselineKey(neighbor, txkey['keyshort']) if key not in self.baseline: self.baseline[key] = 0 self.show(1, 0, "App Name", node, "appName") self.show(2, 0, "App Version", node, "appVersion") s = self.term.cyan("Free: ") + \ str(node["jreFreeMemory"]//MB) + \ " Mb " + \ self.term.cyan("Max: ") + \ str(node["jreMaxMemory"]//MB) + \ " Mb " + \ self.term.cyan("Total: ") + \ str(node["jreTotalMemory"]//MB) + " Mb " self.show_string(1, 1, "JRE Memory", s) self.show_histogram(2, 1, "JRE Memory", node["jreTotalMemory"] - node["jreFreeMemory"], node["jreMaxMemory"], 0.8, span=2) ms_start = node["milestoneStartIndex"] delta_ms_start = self.prev_ms_start - ms_start self.mss_1 = self.mss_0 self.mss_0 = ("%s" % ms_start) + ("" if delta_ms_start == 0 else " (%d)" % delta_ms_start) self.show_string(3, 2, "", " "*16) self.show_string(3, 2, "Milestone Start", self.mss_0, prev=self.mss_1) self.show(4, 2, "Milestone Index", node, "latestMilestoneIndex") self.show(5, 2, "Milestone Solid", node, "latestSolidSubtangleMilestoneIndex") self.show(3, 0, "JRE Version", node, "jreVersion") self.show(4, 1, "Tips", node, "tips") self.show(3, 1, "Tx To Request", node, "transactionsToRequest") self.show_string(6, 0, "Node Address", self.showAddress(NODE)) self.show_string(4, 0, "Baseline", self.baselineStr[self.baselineToggle]) self.show_string(5, 0, "Response Time", str(self.duration) + " ms " + self.term.cyan("Avg: ") + str(self.duration_avg) + " ms ") neighborCount = "%s" % node['neighbors'] if self.incommunicados > 0: neighborCount += self.term.red(" / %d " % self.incommunicados) else: neighborCount += " " self.show_string(6, 2, "Neighbors", neighborCount) if self.localhost: self.show_string(5, 1, "Load Average", getloadavg()) else: self.show_string(5, 1, "Load Average", 'N/A') self.show_neighbors(7, neighbors) def logDuration(self, duration): self.duration = duration self.duration_hist.append(duration) self.duration_avg = int(sum(self.duration_hist) / len(self.duration_hist)) # Limit history to the last 5 minutes of calls if len(self.duration_hist) > (60*5/self.poll_delay): del self.duration_hist[0] def showAddress(self, address): if self.obscureAddrToggle == 1: return scrambleAddress(address) return address def getBaselineKey(self, neighbor, subkey): return "%s:%s" % (neighbor['address'], subkey) def historizer(self, txtype, wsid, hd, n): nid = "%s-%s" % (n['address'], txtype) nidd = "%s-%sd" % (n['address'], txtype) c = n[wsid] try: p = self.hist[nid] hd[nid] = c if p > 0: hd[nidd] = c - p else: hd[nidd] = 0 except KeyError: hd[nid] = 0 hd[nidd] = 0 n["%sDelta" % wsid] = hd[nidd] def show(self, row, col, label, dictionary, value): x1 = (self.width // 3) * col x2 = x1 + 18 vs = self.term.bright_cyan(str(dictionary[value])) # Highlight if no neighbors if value == "neighbors" and dictionary[value] == 0: vs = self.term.red(str(dictionary[value])) # Highlight if latest milestone is out of sync with # the solid milestone if value == "latestSolidSubtangleMilestoneIndex": diff = dictionary["latestSolidSubtangleMilestoneIndex"] - \ dictionary["latestMilestoneIndex"] if diff < 0 and diff >= -2: vs = self.term.yellow( str(dictionary[value]) + "* ") elif diff < -2: vs = self.term.red( str(dictionary[value]) + " (!)") else: vs = str(dictionary[value]) print(self.term.move(row, x2) + " "*(len(vs)+4)) if value in self.prev and dictionary[value] != self.prev[value]: vs = self.term.on_blue(vs) print(self.term.move(row, x1) + self.term.cyan(label + ":")) print(self.term.move(row, x2) + self.term.bright_cyan(vs)) self.prev[value] = dictionary[value] def show_string(self, row, col, label, value, prev=""): x1 = (self.width // 3) * col x2 = x1 + 18 value = str(value) if prev != "" and value != prev: value = self.term.on_blue(value) print(self.term.move(row, x1) + self.term.cyan(label + ":")) print(self.term.move(row, x2) + self.term.bright_cyan(str(value) + " ")) def show_histogram(self, row, col, label, value, value_max, warning_limit=0.8, span=1): label_width = 18 col_width = ((self.width // 3) - label_width) + \ ((span - 1) * (self.width // 3)) x1 = (self.width // 3) * col x2 = x1 + label_width bw = col_width - 2 vm = bw v = int(value / value_max * bw) vl = int(warning_limit * vm) mG = v mY = 0 mR = 0 if v > vl: mR = v - vl mG = mG - mR mB = bw - (mR + mG) if value > (value_max * warning_limit): mY = mG mG = 0 print(self.term.move(row, x1) + self.term.cyan(label + ":")) print(self.term.move(row, x2) + self.term.white("[") + self.term.green("|" * mG) + self.term.yellow("|" * mY) + self.term.red("#" * mR) + self.term.bright_black("-" * mB) + self.term.white("]")) def show_neighbors(self, row, neighbors): global ITER cols = 9 height, width = self.term.height, self.term.width cw = width // cols cw1 = width - ((cols - 1) * cw) cwl = [0, ] for c in range(cols - 1): cwl.append(cw1 + (c * cw)) self.incommunicados = 0 revso = True if self.sortorder == self.sortorderlist[2] else False for k in self.txkeys: ch = k['header'] + (' [%s]' % k['sortkey'] if self.sortmode else (self.sortorderlist[1] if revso else self.sortorderlist[2]) if self.sortcolumn == k['sortcolumn'] else '') ch += "" if k['keyshort'] != 'ad' else " "*(cw*4-len(ch)) print(self.term.move(row, cwl[k['col']]) + self.term.black_on_green(ch.rjust(cw))) row += 1 # Sort neighbors ordered_neighbors = [] if self.sortcolumn is None: self.sortorder = None ordered_neighbors = neighbors else: if self.sortorder is None: self.sortorder = self.sortorderlist[0] ordered_neighbors = sorted(neighbors, key=lambda k: k[self.sortcolumn], reverse=revso) # Show Neighbors for neighbor in ordered_neighbors: self.show_neighbor(row, neighbor, cwl, cw, height) row += 1 # Blank spare neighbor rows for blankrow in range(row, height - 2): print(self.term.move(blankrow, 0) + " " * width) print(self.term.move(height - 2, 0 * cw) + self.term.black_on_cyan( "Q to exit - " "B to reset tx to a zero baseline - " "O to obscure addresses - " "S# to sort column".ljust(width))) ITER += 1 def txString(self, neighbor, key, keydelta, keyshort, column_width): txcnt = neighbor[key] - (self.baseline[self.getBaselineKey(neighbor, keyshort)] * self.baselineToggle) return ("%d (%d)" % (txcnt, neighbor[keydelta])).rjust(column_width) def show_neighbor(self, row, neighbor, column_start_list, column_width, height): global ITER neighbor['addr'] = self.showAddress(neighbor['connectionType'] + "://" + neighbor['address']) # Create display string for txkey in self.txkeys[1:]: neighbor[txkey['keyshort']] = \ self.txString(neighbor, txkey['key'], '%sDelta' % txkey['key'], txkey['keyshort'], column_width) # Highlight neighbors that are incommunicado incommunicado = False if (neighbor['numberOfAllTransactionsDelta'] == 0 and ITER > (6 * self.poll_delay)): neighbor['addr'] = "(!) " + neighbor['addr'] incommunicado = True self.incommunicados += 1 # Pad/Trim neighbor address ncolw = 3 * (column_width + 1) if len(neighbor['addr']) < ncolw: # pad neighbor['addr'] = neighbor['addr'].ljust(ncolw, ' ') elif len(neighbor['addr']) > ncolw: # trim neighbor['addr'] = neighbor['addr'][0:ncolw] value_at = "neighbor-%s-at" % neighbor['address'] if (value_at in self.prev and neighbor['numberOfAllTransactions'] != self.prev[value_at]): neighbor['at'] = self.term.cyan(neighbor['at']) if neighbor['numberOfInvalidTransactions'] > 0: neighbor['it'] = \ self.term.red(str(neighbor['numberOfInvalidTransactions']) .rjust(column_width)) # Blink changed value for txkey in self.txkeys[1:]: neighborkey = "neighbor-%s-%s" % (neighbor['address'], txkey['keyshort']) if (neighborkey in self.prev and neighbor[txkey['key']] != self.prev[neighborkey]): neighbor[txkey['keyshort']] = \ self.term.cyan(neighbor[txkey['keyshort']]) # do not display any neighbors crossing the height of the terminal if row < height - 2: print(self.term.move(row, column_start_list[0]) + (self.term.white(neighbor['addr']) if not incommunicado else self.term.red(neighbor['addr']))) for txkey in self.txkeys[1:]: print(self.term.move(row, column_start_list[txkey['col']]) + self.term.green(neighbor[txkey['keyshort']])) # Store previous value for txkey in self.txkeys[1:]: neighborkey = "neighbor-%s-%s" % (neighbor['address'], txkey['keyshort']) self.prev[neighborkey] = neighbor[txkey['key']]
def info(r=redis_client): term = Terminal() ping_redis(r) stat = Stat.get_all(r) # general stats clusters = len(stat) workers = 0 reincarnations = 0 for cluster in stat: workers += len(cluster.workers) reincarnations += cluster.reincarnations # calculate tasks pm and avg exec time tasks_per = 0 per = _('day') exec_time = 0 last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24)) tasks_per_day = last_tasks.count() if tasks_per_day > 0: # average execution time over the last 24 hours if not connection.vendor == 'sqlite': exec_time = last_tasks.aggregate( time_taken=Sum(F('stopped') - F('started'))) exec_time = exec_time['time_taken'].total_seconds() / tasks_per_day else: # can't sum timedeltas on sqlite for t in last_tasks: exec_time += t.time_taken() exec_time = exec_time / tasks_per_day # tasks per second/minute/hour/day in the last 24 hours if tasks_per_day > 24 * 60 * 60: tasks_per = tasks_per_day / (24 * 60 * 60) per = _('second') elif tasks_per_day > 24 * 60: tasks_per = tasks_per_day / (24 * 60) per = _('minute') elif tasks_per_day > 24: tasks_per = tasks_per_day / 24 per = _('hour') else: tasks_per = tasks_per_day # print to terminal term.clear_eos() col_width = int(term.width / 6) print( term.black_on_green( term.center(_('-- {} summary --').format(Conf.PREFIX)))) print( term.cyan(_('Clusters')) + term.move_x(1 * col_width) + term.white(str(clusters)) + term.move_x(2 * col_width) + term.cyan(_('Workers')) + term.move_x(3 * col_width) + term.white(str(workers)) + term.move_x(4 * col_width) + term.cyan(_('Restarts')) + term.move_x(5 * col_width) + term.white(str(reincarnations))) print( term.cyan(_('Queued')) + term.move_x(1 * col_width) + term.white(str(r.llen(Conf.Q_LIST))) + term.move_x(2 * col_width) + term.cyan(_('Successes')) + term.move_x(3 * col_width) + term.white(str(models.Success.objects.count())) + term.move_x(4 * col_width) + term.cyan(_('Failures')) + term.move_x(5 * col_width) + term.white(str(models.Failure.objects.count()))) print( term.cyan(_('Schedules')) + term.move_x(1 * col_width) + term.white(str(models.Schedule.objects.count())) + term.move_x(2 * col_width) + term.cyan(_('Tasks/{}'.format(per))) + term.move_x(3 * col_width) + term.white('{0:.2f}'.format(tasks_per)) + term.move_x(4 * col_width) + term.cyan(_('Avg time')) + term.move_x(5 * col_width) + term.white('{0:.4f}'.format(exec_time))) return True
def monitor(run_once=False, r=redis_client): term = Terminal() ping_redis(r) with term.fullscreen(), term.hidden_cursor(), term.cbreak(): val = None start_width = int(term.width / 8) while val not in ( u'q', u'Q', ): col_width = int(term.width / 8) # In case of resize if col_width != start_width: print(term.clear) start_width = col_width print( term.move(0, 0) + term.black_on_green(term.center(_('Host'), width=col_width - 1))) print( term.move(0, 1 * col_width) + term.black_on_green(term.center(_('Id'), width=col_width - 1))) print( term.move(0, 2 * col_width) + term.black_on_green( term.center(_('State'), width=col_width - 1))) print( term.move(0, 3 * col_width) + term.black_on_green(term.center(_('Pool'), width=col_width - 1))) print( term.move(0, 4 * col_width) + term.black_on_green(term.center(_('TQ'), width=col_width - 1))) print( term.move(0, 5 * col_width) + term.black_on_green(term.center(_('RQ'), width=col_width - 1))) print( term.move(0, 6 * col_width) + term.black_on_green(term.center(_('RC'), width=col_width - 1))) print( term.move(0, 7 * col_width) + term.black_on_green(term.center(_('Up'), width=col_width - 1))) i = 2 stats = Stat.get_all(r=r) print(term.clear_eos()) for stat in stats: status = stat.status # color status if stat.status == Conf.WORKING: status = term.green(str(Conf.WORKING)) elif stat.status == Conf.STOPPING: status = term.yellow(str(Conf.STOPPING)) elif stat.status == Conf.STOPPED: status = term.red(str(Conf.STOPPED)) elif stat.status == Conf.IDLE: status = str(Conf.IDLE) # color q's tasks = str(stat.task_q_size) if stat.task_q_size > 0: tasks = term.cyan(str(stat.task_q_size)) if Conf.QUEUE_LIMIT and stat.task_q_size == Conf.QUEUE_LIMIT: tasks = term.green(str(stat.task_q_size)) results = stat.done_q_size if results > 0: results = term.cyan(str(results)) # color workers workers = len(stat.workers) if workers < Conf.WORKERS: workers = term.yellow(str(workers)) # format uptime uptime = (timezone.now() - stat.tob).total_seconds() hours, remainder = divmod(uptime, 3600) minutes, seconds = divmod(remainder, 60) uptime = '%d:%02d:%02d' % (hours, minutes, seconds) # print to the terminal print( term.move(i, 0) + term.center(stat.host[:col_width - 1], width=col_width - 1)) print( term.move(i, 1 * col_width) + term.center(stat.cluster_id, width=col_width - 1)) print( term.move(i, 2 * col_width) + term.center(status, width=col_width - 1)) print( term.move(i, 3 * col_width) + term.center(workers, width=col_width - 1)) print( term.move(i, 4 * col_width) + term.center(tasks, width=col_width - 1)) print( term.move(i, 5 * col_width) + term.center(results, width=col_width - 1)) print( term.move(i, 6 * col_width) + term.center(stat.reincarnations, width=col_width - 1)) print( term.move(i, 7 * col_width) + term.center(uptime, width=col_width - 1)) i += 1 # for testing if run_once: return Stat.get_all(r=r) print(term.move(i + 2, 0) + term.center(_('[Press q to quit]'))) val = term.inkey(timeout=1)
def info(broker=None): if not broker: broker = get_broker() term = Terminal() broker.ping() stat = Stat.get_all(broker=broker) # general stats clusters = len(stat) workers = 0 reincarnations = 0 for cluster in stat: workers += len(cluster.workers) reincarnations += cluster.reincarnations # calculate tasks pm and avg exec time tasks_per = 0 per = _('day') exec_time = 0 last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24)) tasks_per_day = last_tasks.count() if tasks_per_day > 0: # average execution time over the last 24 hours if not connection.vendor == 'sqlite': exec_time = last_tasks.aggregate(time_taken=Sum(F('stopped') - F('started'), output_field=FloatField())) exec_time = exec_time['time_taken'] / tasks_per_day else: # can't sum timedeltas on sqlite for t in last_tasks: exec_time += t.time_taken() exec_time = exec_time / tasks_per_day # tasks per second/minute/hour/day in the last 24 hours if tasks_per_day > 24 * 60 * 60: tasks_per = tasks_per_day / (24 * 60 * 60) per = _('second') elif tasks_per_day > 24 * 60: tasks_per = tasks_per_day / (24 * 60) per = _('minute') elif tasks_per_day > 24: tasks_per = tasks_per_day / 24 per = _('hour') else: tasks_per = tasks_per_day # print to terminal print(term.clear_eos()) col_width = int(term.width / 6) print(term.black_on_green( term.center( _('-- {} {} on {} --').format(Conf.PREFIX.capitalize(), '.'.join(str(v) for v in VERSION), broker.info())))) print(term.cyan(_('Clusters')) + term.move_x(1 * col_width) + term.white(str(clusters)) + term.move_x(2 * col_width) + term.cyan(_('Workers')) + term.move_x(3 * col_width) + term.white(str(workers)) + term.move_x(4 * col_width) + term.cyan(_('Restarts')) + term.move_x(5 * col_width) + term.white(str(reincarnations)) ) print(term.cyan(_('Queued')) + term.move_x(1 * col_width) + term.white(str(broker.queue_size())) + term.move_x(2 * col_width) + term.cyan(_('Successes')) + term.move_x(3 * col_width) + term.white(str(models.Success.objects.count())) + term.move_x(4 * col_width) + term.cyan(_('Failures')) + term.move_x(5 * col_width) + term.white(str(models.Failure.objects.count())) ) print(term.cyan(_('Schedules')) + term.move_x(1 * col_width) + term.white(str(models.Schedule.objects.count())) + term.move_x(2 * col_width) + term.cyan(_('Tasks/{}'.format(per))) + term.move_x(3 * col_width) + term.white('{0:.2f}'.format(tasks_per)) + term.move_x(4 * col_width) + term.cyan(_('Avg time')) + term.move_x(5 * col_width) + term.white('{0:.4f}'.format(exec_time)) ) return True
def info(r=redis_client): term = Terminal() ping_redis(r) stat = Stat.get_all(r) # general stats clusters = len(stat) workers = 0 reincarnations = 0 for cluster in stat: workers += len(cluster.workers) reincarnations += cluster.reincarnations # calculate tasks pm and avg exec time tasks_per = 0 per = _("day") exec_time = 0 last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24)) tasks_per_day = last_tasks.count() if tasks_per_day > 0: # average execution time over the last 24 hours if not connection.vendor == "sqlite": exec_time = last_tasks.aggregate(time_taken=Sum(F("stopped") - F("started"))) exec_time = exec_time["time_taken"].total_seconds() / tasks_per_day else: # can't sum timedeltas on sqlite for t in last_tasks: exec_time += t.time_taken() exec_time = exec_time / tasks_per_day # tasks per second/minute/hour/day in the last 24 hours if tasks_per_day > 24 * 60 * 60: tasks_per = tasks_per_day / (24 * 60 * 60) per = _("second") elif tasks_per_day > 24 * 60: tasks_per = tasks_per_day / (24 * 60) per = _("minute") elif tasks_per_day > 24: tasks_per = tasks_per_day / 24 per = _("hour") else: tasks_per = tasks_per_day # print to terminal term.clear_eos() col_width = int(term.width / 6) print(term.black_on_green(term.center(_("-- {} summary --").format(Conf.PREFIX)))) print( term.cyan(_("Clusters")) + term.move_x(1 * col_width) + term.white(str(clusters)) + term.move_x(2 * col_width) + term.cyan(_("Workers")) + term.move_x(3 * col_width) + term.white(str(workers)) + term.move_x(4 * col_width) + term.cyan(_("Restarts")) + term.move_x(5 * col_width) + term.white(str(reincarnations)) ) print( term.cyan(_("Queued")) + term.move_x(1 * col_width) + term.white(str(r.llen(Conf.Q_LIST))) + term.move_x(2 * col_width) + term.cyan(_("Successes")) + term.move_x(3 * col_width) + term.white(str(models.Success.objects.count())) + term.move_x(4 * col_width) + term.cyan(_("Failures")) + term.move_x(5 * col_width) + term.white(str(models.Failure.objects.count())) ) print( term.cyan(_("Schedules")) + term.move_x(1 * col_width) + term.white(str(models.Schedule.objects.count())) + term.move_x(2 * col_width) + term.cyan(_("Tasks/{}".format(per))) + term.move_x(3 * col_width) + term.white("{0:.2f}".format(tasks_per)) + term.move_x(4 * col_width) + term.cyan(_("Avg time")) + term.move_x(5 * col_width) + term.white("{0:.4f}".format(exec_time)) ) return True