def internetFullStatusReport(timeoutLimit = 5, whiptail = False, returnStatus = False): """ Full check of all sites used by PiNet. Only needed on initial install """ sites = [] sites.append([_("Main Raspbian repository"), "http://archive.raspbian.org/raspbian.public.key", ("Critical"), False]) sites.append([_("Raspberry Pi Foundation repository"), "http://archive.raspberrypi.org/debian/raspberrypi.gpg.key", ("Critical"),False]) sites.append([_("Github"), "https://github.com", ("Critical"), False]) sites.append([_("Bit.ly"), "http://bit.ly", ("Highly recommended"), False]) sites.append([_("Bitbucket (Github mirror, not active yet)"), "https://bitbucket.org", ("Recommended"), False]) sites.append([_("BlueJ"), "http://bluej.org", ("Recommended"), False]) sites.append([_("PiNet metrics"), "https://secure.pinet.org.uk", ("Recommended"), False]) for website in range(0, len(sites)): sites[website][3] = testSiteConnection(sites[website][1]) if returnStatus: return sites if whiptail: message = "" for website in sites: if sites[3]: status = "Success" else: status = "Failed" message = message + status + " - " + website[2] + " - " + website[0] + " (" + website[1] + ")\n" if (shutil.get_terminal_size()[0] < 105) or (shutil.get_terminal_size()[0] < 30): print("\x1b[8;30;105t") time.sleep(0.05) whiptailBox("msgbox", "Web filtering test results", message, True, height="14", width="100") else: for website in range(0, len(sites)): print(str(sites[website][2] + " - " ))
def _print_frame(self, index): self.frame_number = index frame_text = '{color}Frame #{0:04d} of {1:d} :{nocolor}\n{2:s}\n'.format( self.frame_number + 1, self._jury_state_count(), self.painter_factory(self.game_controller.get_players()) .ascii_paint( self.game_controller.jury_states[index]), color=Fore.YELLOW + Style.BRIGHT, nocolor=Fore.RESET + Style.NORMAL) self.lock.acquire() _clear() height = get_terminal_size()[1] - 3 frame_text = frame_text.split('\n') lines = len(frame_text) frame_text = '\n\r'.join(frame_text[:height]) print(frame_text) self.lock.release() self.prev_frame = frame_text.split('\n') if lines > get_terminal_size()[1]: #print('') clear_string(height + 1) clear_string(height + 3) self._error('Increase the height of the terminal and necessarily \ press Home to see the full information', end='') clear_string(height + 2)
def __init__(self, title='', label_text='', default='', padding=10, completer=None): self.future = Future() def accept_text(buf): get_app().layout.focus(ok_button) buf.complete_state = None return True def accept(): self.future.set_result(self.text_area.text) def cancel(): self.future.set_result(None) self.text_area = TextArea( completer=completer, text=default, multiline=False, width=D(preferred=shutil.get_terminal_size()[0]-padding), accept_handler=accept_text) ok_button = Button(text='OK', handler=accept) cancel_button = Button(text='Cancel', handler=cancel) self.dialog = Dialog( title=title, body=HSplit([ Label(text=label_text), self.text_area ]), buttons=[ok_button, cancel_button], # buttons=[ok_button], width=D(preferred=shutil.get_terminal_size()[0]-10), modal=True)
def test_os_environ_first(self): "Check if environment variables have precedence" with support.EnvironmentVarGuard() as env: env['COLUMNS'] = '777' size = shutil.get_terminal_size() self.assertEqual(size.columns, 777) with support.EnvironmentVarGuard() as env: env['LINES'] = '888' size = shutil.get_terminal_size() self.assertEqual(size.lines, 888)
def sigwinch_handler(self, n, frame): self.window_height = shutil.get_terminal_size().lines - 10 self.window_width = shutil.get_terminal_size().columns - 20 curses.endwin() self.stdscr.clear() self.stdscr = curses.initscr() self.win = curses.newwin( 5 + self.window_height, self.window_width, 2, 4 )
def __init__(self, menu_items=[], title="", arrow="==>", footer="Space = toggle ─ Enter = accept ─ q = cancel", more="...", char_selected="[X]", char_empty="[ ]"): """Initialization. Parameters ---------- menu_items : list The data that will be used to create the menu items. title : str, optional A title to use on the menu. arrow : str, optional The character/s used to *point* the menu item that can be selected. footer : str, optional Informational text placed at the bottom of the menu. more : str, optional Character/s representing the availability of more menu items than the screen can display. char_selected : str, optional The character/s used to represent a selected menu item. char_empty : str, optional The character/s used to represent a non selected menu item. """ self.title = title self.arrow = arrow self.footer = footer self.more = more self.char_selected = char_selected self.char_empty = char_empty self.all_menu_items = [] self.win = None self.stdscr = None self.cursor = 0 self.offset = 0 self.selected = 0 self.selcount = 0 self.aborted = False self.window_height = (get_terminal_size((80, 24))[1] or 24) - 5 self.window_width = (get_terminal_size((80, 24))[0] or 80) self.length = 0 for item in menu_items: self.all_menu_items.append({ "label": item, "selected": False }) self.length = len(self.all_menu_items) self.curses_start() curses.wrapper(self.curses_loop) self.curses_stop()
def installSoftwareList(holdOffInstall = False): """ Replacement for ExtraSoftware function in bash. Builds a list of possible software to install (using softwarePackage class) then displays the list using checkbox Whiptail menu. Checks what options the user has collected, then saves the packages list to file (using pickle). If holdOffInstall is False, then runs installSoftwareFromFile(). """ software = [] software.append(softwarePackage("Libreoffice", _("A free office suite, similar to Microsoft office"), "script", ["apt-get purge -y openjdk-6-jre-headless openjdk-7-jre-headless ca-certificates-java", "apt-get install -y libreoffice gcj-4.7-jre gcj-jre gcj-jre-headless libgcj13-awt"])) software.append(softwarePackage("Arduino-IDE", _("Programming environment for Arduino microcontrollers"), "apt", ["arduino",])) software.append(softwarePackage("Scratch-gpio", _("A special version of scratch for GPIO work") , "scratchGPIO", ["",])) software.append(softwarePackage("Python-hardware", _("Python libraries for a number of additional addon boards"), "pip", ["pibrella skywriter unicornhat piglow pianohat explorerhat microstacknode twython"])) software.append(softwarePackage("Epoptes", _("Free and open source classroom management software"), "epoptes", ["",])) software.append(softwarePackage("BlueJ", _("A Java IDE for developing programs quickly and easily"), "script", ["rm -rf /tmp/bluej-314a.deb", "rm -rf /opt/ltsp/armhf/tmp/bluej-314a.deb", "wget http://bluej.org/download/files/bluej-314a.deb -O /tmp/bluej-314a.deb", "dpkg -i /tmp/bluej-314a.deb"])) software.append(softwarePackage("Custom-package", _("Allows you to enter the name of a package from Raspbian repository"), "customApt", ["",])) software.append(softwarePackage("Custom-python", _("Allows you to enter the name of a Python library from pip."), "customPip", ["",])) softwareList = [] for i in software: softwareList.append([i.name, i.description]) done = False if (shutil.get_terminal_size()[0] < 105) or (shutil.get_terminal_size()[0] < 30): print("\x1b[8;30;105t") time.sleep(0.05) #print("Resizing") while done == False: whiptailBox("msgbox", _("Additional Software"), _("In the next window you can select additional software you wish to install. Use space bar to select applications and hit enter when you are finished."), False) result = (whiptailCheckList(_("Extra Software Submenu"), _("Select any software you want to install. Use space bar to select then enter to continue."), softwareList)) try: result = result.decode("utf-8") except AttributeError: return result = result.replace('"', '') if result != "Cancel": if result == "": yesno = whiptailBox("yesno", _("Are you sure?"), _("Are you sure you don't want to install any additional software?"), True) if yesno: savePickled(software) done = True else: resultList = result.split(" ") yesno = whiptailBox("yesno", _("Are you sure?"), _("Are you sure you want to install this software?") + " \n" + (result.replace(" ", "\n")), True, height=str(7+len(result.split(" ")))) if yesno: for i in software: if i.name in resultList: i.customAptPip() #i.marked = True done = True savePickled(software) if holdOffInstall == False: installSoftwareFromFile()
def get_terminal_size(): """ Detect terminal size and return tuple = (width, height). Only to be used when running in a terminal. Note that the IPython notebook, IPython zmq frontends, or IDLE do not run in a terminal, """ import platform if sys.version_info[0] >= 3: return shutil.get_terminal_size() current_os = platform.system() tuple_xy = None if current_os == 'Windows': tuple_xy = _get_terminal_size_windows() if tuple_xy is None: tuple_xy = _get_terminal_size_tput() # needed for window's python in cygwin's xterm! if current_os == 'Linux' or \ current_os == 'Darwin' or \ current_os.startswith('CYGWIN'): tuple_xy = _get_terminal_size_linux() if tuple_xy is None: tuple_xy = (80, 25) # default value return tuple_xy
def format_table(rows, width='AUTO', title=None): if width == 'AUTO': try: from shutil import get_terminal_size width = get_terminal_size()[0] - 1 except ImportError: width = 1000000 rows = list(rows) column_widths = [max(map(len, c)) for c in zip(*rows)] if sum(column_widths) + 2*(len(column_widths) - 1) > width: largest_column = np.argmax(column_widths) column_widths[largest_column] = 0 min_width = max(column_widths) column_widths[largest_column] = max(min_width, width - 2*(len(column_widths) - 1) - sum(column_widths)) total_width = sum(column_widths) + 2*(len(column_widths) - 1) wrapped_rows = [] for row in rows: cols = [wrap(c, width=cw) for c, cw in zip(row, column_widths)] for r in zip_longest(*cols, fillvalue=""): wrapped_rows.append(r) rows = wrapped_rows rows.insert(1, ['-' * cw for cw in column_widths]) if title is not None: title = '{:^{}}\n{:^{}}\n\n'.format(title, total_width, '='*len(title), total_width) else: title = '' return title + '\n'.join(' '.join('{:<{}}'.format(c, cw) for c, cw in zip(r, column_widths)) for r in rows)
def pretty_histogram(data): ca, cb, cc, cs, ct, cv = '37', '0', '93', '93', '37', '31' vals, counts = zip(*data) clen, vlen = max(map(len, map(str, counts))), max(map(len, map(str, vals))) cols, rows = shutil.get_terminal_size() fmt = '\033[{cv}m{{:>{vlen}}}\033[{ca}m│\033[{cc}m{{:<{clen}}}\033[{ca}m│\033[{cb}m{{}}'.format( cv=cv, ca=ca, cc=cc, cb=cb, vlen=vlen, clen=clen) # This will break for one billion reviews or more in one bin tick_fmt = '\033[{cs}m{{:>9}}\033[{ct}m'.format(cs=cs, ct=ct) tickw = term_width(tick_fmt.format(0))+1 # +1 for pointer character added below graphw = cols - term_width(fmt.format(0, 0, '')) ticks = int(graphw/tickw) grid = (' '*(tickw-1) + '│')*ticks min_step = max(counts)/ticks foo = 10**int(math.log10(min_step)) sensible_step = int((math.ceil(min_step/foo)) * foo) step = sensible_step/tickw graphlines = [ fmt.format(val, count, bar(count, step) + '\033[{ct}m'.format(ct=ct) + grid[int(count/step)+1:]) for val, count in data ] scale = lambda c: fmt.format('x', '#', ((tick_fmt+c)*ticks).format(*((i+1)*sensible_step for i in range(ticks)))) # chunksize = int(rows/2) chunksize = rows-2 print(scale('┐')) while graphlines: print('\n'.join(graphlines[:chunksize])) graphlines = graphlines[chunksize:] if graphlines: print(scale('┤')) print(scale('┘'))
def _size_36(): """ returns the rows, columns of terminal """ from shutil import get_terminal_size dim = get_terminal_size() if isinstance(dim, list): return dim[0], dim[1] return dim.lines, dim.columns
def _dlnotice(self): while self.dcc is not None: pos = self.received_bytes/self.size pos = int(30*pos) posstr = (("="*pos)+">").ljust(30, " ") if self.warning: extra = ">> " + self.warning + " <<" if self._short_warning: self.warning = "" else: extra = repr(self.original_filename) # Make sure the status line fits the screen. term_size = shutil.get_terminal_size((80,20)) if term_size.columns > 100: # Calcculate speed meter. speed = " ---.-- " if self.received_bytes != 0: byte_delta = self.received_bytes - self._bar_received_bytes speed = " %8s/s"%humansize(byte_delta) self._bar_received_bytes = self.received_bytes else: speed = "" # Generate the new one. string = "".join(("\r%8s/%8s"%( humansize(self.received_bytes), humansize(self.size) ), speed, " [", posstr, "] ", extra, " ")) self._write_status(string) time.sleep(1)
def __init__(self, message, totalCount = None, disp=True): if not disp or sys.platform == 'win32': self.update = self.empty self.curlUpdate = self.empty self.progress = self.empty self.progressBy = self.empty self.outputProgress = self.empty self.done = self.empty self.main = '' self.finished = 0 return # windows system does not support ncurse import blessings self.term = blessings.Terminal(stream=sys.stderr) self.main = message self.main_start_time = time.time() self.message = self.main # get terminal width self.term_width = shutil.get_terminal_size((80, 20)).columns # # total count self.count = 0 # total initial count self.init_count = self.count # self.finished = 0 self.uuid = uuid.uuid4().hex with fasteners.InterProcessLock('/tmp/sos_progress_'): with open('/tmp/sos_progress', 'a') as prog_index: prog_index.write('{}\n'.format(self.uuid)) self.reset('', totalCount)
def table_print(self, title='', columns='', nextt=False): """Функция форматирует вывод в виде таблички""" space = lambda s, m: (s, " " * (m - len(s))) if len(s) <= m else (s[:m - 1], '') listmerge = lambda s: reduce(lambda d, el: d.extend(el) or d, s, []) maximum = shutil.get_terminal_size((80, 20))[0] - 1 def cwidth(cn): "Функция вычисляет ширину каждого из столбцов" free = maximum - cn - 1 tmp = int(free / cn) width = [tmp for n in range(cn)] if free % cn != 0: width[0] += free % cn return width if not nextt: print("-" * maximum) if title: print("|%s%s|" % space(title, maximum - 2)) print("-" * maximum) if columns: sp = cwidth(len(columns[0])) for c in columns: print(("|%s%s" * len(columns[0]) + "|") % tuple(listmerge( [space(c[i], sp[i]) for i in range(len(c))]))) print("-" * maximum)
def __init__(self, prefix='', maxValue=100, length=20, percent=True, padPrefix=None, file=sys.stdout): if padPrefix: self.prefix = ('{:<%s}' % padPrefix).format(prefix) else: self.prefix = prefix # Make sure it'll fit in the console try: maxWidth = shutil.get_terminal_size()[0] except: maxWidth = 80 # Calculate length of current message # +9 accounts for spacing, brackets, percentage, and one empty # space to prevent scrolling to the next line automatically width = len(self.prefix) + length + 9 if width > maxWidth: extra = width - maxWidth # Too long, make things smaller if length > 20: remove = length - max(20, length - extra) extra -= remove length -= remove if extra > 0: # Still too much remove = self.prefix[-extra:] self.prefix = self.prefix[:-extra] if remove != ' '*extra: # Had to remove some text self.prefix = self.prefix[:-3] + '...' self.end = maxValue self.length = length self.cur = 0 self.outFile = file self.percent = percent
def calendar(collection, dates=None, firstweekday=0, locale=None, weeknumber=False, show_all_days=False, conf=None, hmethod='fg', default_color='', multiple='', color='', highlight_event_days=0, week=False, full=False, bold_for_light_color=True, **kwargs): if dates is None: dates = [datetime.today()] term_width, _ = get_terminal_size() lwidth = 25 rwidth = term_width - lwidth - 4 event_column = get_agenda( collection, locale, dates=dates, width=rwidth, show_all_days=show_all_days, week=week, full=full, bold_for_light_color=bold_for_light_color, **kwargs) calendar_column = calendar_display.vertical_month( firstweekday=firstweekday, weeknumber=weeknumber, collection=collection, hmethod=hmethod, default_color=default_color, multiple=multiple, color=color, highlight_event_days=highlight_event_days, locale=locale, bold_for_light_color=bold_for_light_color) rows = merge_columns(calendar_column, event_column) echo('\n'.join(rows))
def triple_column_print(iterable): chunk_count = 3 max_width = shutil.get_terminal_size().columns chunk_size = (max_width - 4) // chunk_count args = [iter(iterable)] * chunk_count for triplet in itertools.zip_longest(fillvalue='', *args): print(' {0:<{width}}{1:^{width}}{2:>{width}} '.format(width=chunk_size, *triplet))
def test_echo_request_line(): sio = io.StringIO() d = dumper.Dumper(sio) with taddons.context(options=options.Options()) as ctx: ctx.configure(d, flow_detail=3, showhost=True) f = tflow.tflow(client_conn=None, server_conn=True, resp=True) f.request.is_replay = True d._echo_request_line(f) assert "[replay]" in sio.getvalue() sio.truncate(0) f = tflow.tflow(client_conn=None, server_conn=True, resp=True) f.request.is_replay = False d._echo_request_line(f) assert "[replay]" not in sio.getvalue() sio.truncate(0) f = tflow.tflow(client_conn=None, server_conn=True, resp=True) f.request.http_version = "nonstandard" d._echo_request_line(f) assert "nonstandard" in sio.getvalue() sio.truncate(0) ctx.configure(d, flow_detail=0, showhost=True) f = tflow.tflow(client_conn=None, server_conn=True, resp=True) terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50) f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated" d._echo_request_line(f) assert "textToBeTruncated" not in sio.getvalue() sio.truncate(0)
def __init__(self, network): """This doesn't really need a docstring, does it?""" self.network = network self.quitting = False self.mode = self.Mode.list self.current_entity = None self.status = "Started PIMesh" self.titlebar = "PIMesh" self.cols, self.lines = shutil.get_terminal_size() self.used_lines = 0 # Used for vertical padding self.commands = { "duck": self.command_duck, "list": self.command_list, "view": self.command_view, "help": self.command_help, "add": self.command_add, "remove": self.command_remove, "quit": self.command_quit, } self.mode_content = { UI.Mode.list: self.list_print, UI.Mode.links: self.links_print, UI.Mode.help: self.help_print, UI.Mode.duck: self.duck_print }
def getFile( url, destdir , filename='' ): """download file from 'url' into 'destdir'""" EmergeDebug.debug("getFile called. url: %s" % url, 1) if url == "": EmergeDebug.error("fetch: no url given") return False if UtilsCache.findApplication("wget"): return wgetFile( url, destdir , filename ) if not filename: _, _, path, _, _, _ = urllib.parse.urlparse( url ) filename = os.path.basename( path ) if os.path.exists(os.path.join( destdir, filename )): return True width, _ = shutil.get_terminal_size((80,20)) def dlProgress(count, blockSize, totalSize): percent = int(count * blockSize * 100 / totalSize) times = int((width - 20)/100 * percent) sys.stdout.write(("\r%s%3d%%" % ("#" * times, percent))) sys.stdout.flush() urllib.request.urlretrieve(url, filename = os.path.join( destdir, filename ), reporthook= dlProgress if EmergeDebug.verbose() >= 0 else None ) if EmergeDebug.verbose()>=0: sys.stdout.write("\n") sys.stdout.flush() return True
def update_bar(self): """ Update the progress bar without displaying it Returns bool: True if the progress bar changed and should be redrawn """ changed = False width, _ = get_terminal_size() # Drawing at the end of the row will create a newline on windows width -= 1 if width != self.width: # The width of the terminal changed self.width = width changed = True # Compute progress if self.bb: current = self.bb.current_users + self.bb.current_topics + self.bb.current_posts total = self.bb.total_users + self.bb.total_topics + self.bb.total_posts else: current = 0 total = 0 if self.current != current or self.total != total: self.current = current self.total = total changed = True if changed: # Size of the actual progress bar # Leave space for two brackets, one space, and four characters for the percentage barsize = self.width - 7 if total == 0: completedsize = 0 uncompletedsize = barsize progress = 0 else: # Size of the part of the progress bar that is completed completedsize = current * barsize // total if completedsize > barsize: completedsize = barsize uncompletedsize = barsize - completedsize # Progress (in percents) progress = current * 100 // total if progress > 100: progress = 100 progressbar = "[{}{}] {:3}%".format("#"*completedsize, " "*uncompletedsize, progress) if progressbar != self.progressbar: self.progressbar = progressbar return True # The progress bar did not change return False
def session(self, router, sessionid): rid = router['id'] w_save, h_save = None, None res = 'remote/control/csterm/ecmcli-%s/' % sessionid in_data = '\n' poll_timeout = self.key_idle_timeout # somewhat arbitrary while True: w, h = shutil.get_terminal_size() if (w, h) != (w_save, h_save): out = self.api.put(res, { "w": w, "h": h, "k": in_data }, id=rid)[0] w_save, h_save = w, h data = out['data']['k'] if out['success'] else None else: out = self.api.put('%sk' % res, in_data, id=rid)[0] data = out['data'] if out['success'] else None if out['success']: if data: self.full_write(self.raw_out, data.encode()) poll_timeout = 0 # Quickly look for more data else: poll_timeout += 0.050 else: raise Exception('%s (%s)' % (out['exception'], out['reason'])) poll_timeout = min(self.poll_max_retry, poll_timeout) in_data = self.buffered_read(max_timeout=poll_timeout)
def pew(): cmds = dict((cmd[:-4], fun) for cmd, fun in globals().items() if cmd.endswith("_cmd")) if sys.argv[1:]: try: command = cmds[sys.argv[1]] sys.argv = ["-".join(sys.argv[:2])] + sys.argv[2:] update_args_dict() try: return command() except CalledProcessError as e: return e.returncode except KeyError: print("ERROR: command %s does not exist." % sys.argv[1], file=sys.stderr) longest = max(map(len, cmds)) + 3 columns, _ = get_terminal_size() print("Available commands:\n") for cmd, fun in cmds.items(): if fun.__doc__: print( textwrap.fill( fun.__doc__.splitlines()[0], columns, initial_indent=(" {0}: ".format(cmd)).ljust(longest), subsequent_indent=longest * " ", ) ) else: print(" " + cmd)
def _get_terminal_size(): if platform.system() == "Windows": ####################### ## Copied from i-net ## ####################### from ctypes import windll, create_string_buffer h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: import struct (bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) sizex = right - left + 1 sizey = bottom - top + 1 else: sizex, sizey = 80, 25 return sizex else: import shutil str = shutil.get_terminal_size() intList = re.findall(r'\d+', "{0}".format(str)) return int(intList[0])
def main(): parse_tasks_file() cls() try: while True: print(header_message) global header_message header_message = '' width = shutil.get_terminal_size((80, 24))[0] -1 tasks.sort() for i, t in enumerate(tasks): print(format_task(t, i+1, width=width), end="") n_all = 0 n_complete = 0 for t in tasks: n_all += 1 if t.completed: n_complete += 1 percent = (n_complete/n_all * 100) if n_all > 0 else 0 if percent == 100: percent = "###" else: percent = str(int(percent)) + "%" cmd = input('\n [%3s]' % percent) exec_cmd(cmd) cls() except KeyboardInterrupt: cls()
def vocabResults(word): url = 'https://www.vocabulary.com/dictionary/' + word response = requests.get(url, headers={"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0"}) html = response.content vocab_soup = BeautifulSoup(html, "lxml") [vocab_soup, suggestedWord] = handleVocabDidYouMean(vocab_soup, word) columns = shutil.get_terminal_size().columns print(color.getMagenta(word.upper().center(columns))) short = vocab_soup.find("p", {"class":"short"}) if short != None: print(color.getRed(printAligned("SHORT ", short.text))) lonng = vocab_soup.find("p", {"class":"long"}) if lonng != None: print(color.getDarkCyan(printAligned("LONG ", lonng.text))) json_url = 'https://corpus.vocabulary.com/api/1.0/examples.json?query=' + str(word) + '&maxResults=24&startOffset=0&filter=0' json_url = json_url.replace(' ', '%20') html = urllib.request.urlopen(json_url).read() j = json.loads(html.decode('utf-8')) if j['result'] != None: l = j['result']['sentences'] print("Examples from Vocabulary:".upper().center(columns)) for s in l[:5]: print(color.getItalics(printAligned(" > ", s['sentence']))) return word
def agenda(collection, dates=None, show_all_days=False, full=False, week=False, bold_for_light_color=True, **kwargs): term_width, _ = get_terminal_size() event_column = get_agenda( collection, dates=dates, width=term_width, show_all_days=show_all_days, full=full, week=week, bold_for_light_color=bold_for_light_color, **kwargs) echo('\n'.join(event_column))
def __init__(self, text, width, options=ProgressBarOptions()): """ Create ProgressBar object :argument text Text of the ProgressBar :argument options Format of progress Bar :type text str :type width int :type options ProgressBarOptions """ self._text = text self._status_msg = "" self._width = width self._max = 0 self._console_width = get_terminal_size(fallback=(80, 24))[0] self._value = None self._timer = _ProgressBarTiming() self._begin_line_character = '\r' self._options = options self._infinite_mode = None self._infinite_position = None self._infinite_width = 1 self.stdout = sys.stdout self._status = ProgressBarStatus.stopped
def __init__(self): self.term_width = shutil.get_terminal_size().columns self.formater = lambda prog: argparse.HelpFormatter(prog, max_help_position=int(self.term_width / 2), width=self.term_width) self.commands = {} self.hidden_commands = [] self.parser = argparse.ArgumentParser(prog='meson', formatter_class=self.formater) self.subparsers = self.parser.add_subparsers(title='Commands', description='If no command is specified it defaults to setup command.') self.add_command('setup', msetup.add_arguments, msetup.run, help_msg='Configure the project') self.add_command('configure', mconf.add_arguments, mconf.run, help_msg='Change project options',) self.add_command('install', minstall.add_arguments, minstall.run, help_msg='Install the project') self.add_command('introspect', mintro.add_arguments, mintro.run, help_msg='Introspect project') self.add_command('init', minit.add_arguments, minit.run, help_msg='Create a new project') self.add_command('test', mtest.add_arguments, mtest.run, help_msg='Run tests') self.add_command('wrap', wraptool.add_arguments, wraptool.run, help_msg='Wrap tools') self.add_command('subprojects', msubprojects.add_arguments, msubprojects.run, help_msg='Manage subprojects') self.add_command('help', self.add_help_arguments, self.run_help_command, help_msg='Print help of a subcommand') self.add_command('rewrite', lambda parser: rewriter.add_arguments(parser, self.formater), rewriter.run, help_msg='Modify the project definition') # Hidden commands self.add_command('runpython', self.add_runpython_arguments, self.run_runpython_command, help_msg=argparse.SUPPRESS) self.add_command('unstable-coredata', munstable_coredata.add_arguments, munstable_coredata.run, help_msg=argparse.SUPPRESS)
def printPaths(self, compact=False): """"" Prints the paths in this DotConfig, with a source and destination for the symlink, the paths are expanded by default """ (cols, rows) = shutil.get_terminal_size() print( CLS.lightblue + (cols * "-")+ CLS.none) print(" Files for package: "+ CLS.green + self.package_name + CLS.none) print( CLS.lightblue + (cols * "-")+ CLS.none) paths_dict = dict() if compact : paths_dict = self.paths_dict else: paths_dict = self.expanded_paths_dict for f in paths_dict: source = f dest = paths_dict[f] print(CLS.yellow+"Symlink: " + CLS.green + dest + CLS.none+" >> "+ CLS.lightblue + source + CLS.none)
def _get_finished_line(self, progress): width, _ = get_terminal_size() line = "{} finished in {}.".format( progress.name_after, timestamp2timedelta(progress.elapsed_seconds())).ljust(width) return "\r" + line
def memory_summary(state, group_by="NODE_ADDRESS", sort_by="OBJECT_SIZE", line_wrap=True) -> str: from ray.new_dashboard.modules.stats_collector.stats_collector_head\ import node_stats_to_dict # Get terminal size import shutil size = shutil.get_terminal_size((80, 20)).columns line_wrap_threshold = 137 # Fetch core memory worker stats, store as a dictionary core_worker_stats = [] for raylet in state.node_table(): stats = node_stats_to_dict( node_stats(raylet["NodeManagerAddress"], raylet["NodeManagerPort"])) core_worker_stats.extend(stats["coreWorkersStats"]) assert type(stats) is dict and "coreWorkersStats" in stats # Build memory table with "group_by" and "sort_by" parameters group_by, sort_by = get_group_by_type(group_by), get_sorting_type(sort_by) memory_table = construct_memory_table(core_worker_stats, group_by, sort_by).as_dict() assert "summary" in memory_table and "group" in memory_table # Build memory summary mem = "" group_by, sort_by = group_by.name.lower().replace( "_", " "), sort_by.name.lower().replace("_", " ") summary_labels = [ "Mem Used by Objects", "Local References", "Pinned Count", "Pending Tasks", "Captured in Objects", "Actor Handles" ] summary_string = "{:<19} {:<16} {:<12} {:<13} {:<19} {:<13}\n" object_ref_labels = [ "IP Address", "PID", "Type", "Call Site", "Size", "Reference Type", "Object Ref" ] object_ref_string = "{:<8} {:<3} {:<4} {:<9} {:<4} {:<14} {:<10}\n" if size > line_wrap_threshold and line_wrap: object_ref_string = "{:<12} {:<5} {:<6} {:<22} {:<6} {:<18} \ {:<56}\n" mem += f"Grouping by {group_by}...\ Sorting by {sort_by}...\n\n\n\n" for key, group in memory_table["group"].items(): # Group summary summary = group["summary"] summary["total_object_size"] = str(summary["total_object_size"]) + " B" mem += f"--- Summary for {group_by}: {key} ---\n" mem += summary_string\ .format(*summary_labels) mem += summary_string\ .format(*summary.values()) + "\n" # Memory table per group mem += f"--- Object references for {group_by}: {key} ---\n" mem += object_ref_string\ .format(*object_ref_labels) for entry in group["entries"]: entry["object_size"] = str(entry["object_size"]) + " B" if entry[ "object_size"] > -1 else "?" num_lines = 1 if size > line_wrap_threshold and line_wrap: call_site_length = 22 entry["call_site"] = [ entry["call_site"][i:i + call_site_length] for i in range( 0, len(entry["call_site"]), call_site_length) ] num_lines = len(entry["call_site"]) object_ref_values = [ entry["node_ip_address"], entry["pid"], entry["type"], entry["call_site"], entry["object_size"], entry["reference_type"], entry["object_ref"] ] for i in range(len(object_ref_values)): if not isinstance(object_ref_values[i], list): object_ref_values[i] = [object_ref_values[i]] object_ref_values[i].extend( ["" for x in range(num_lines - len(object_ref_values[i]))]) for i in range(num_lines): row = [elem[i] for elem in object_ref_values] mem += object_ref_string\ .format(*row) mem += "\n" mem += "\n\n\n" return mem
def get_terminal_width(): return shutil.get_terminal_size().columns
except ImportError: print('''This program requires the bext module, which you can install by opening a Terminal window (on macOS & Linux) and running: python3 -m pip install --user bext or a Command Prompt window (on Windows) and running: python -m pip install --user bext''') sys.exit() # Set up the constants: PAUSE_LENGTH = 0.1 # Get the size of the terminal window: WIDTH, HEIGHT = shutil.get_terminal_size() # We can't print to the last column on Windows without it adding a # newline automatically, so reduce the width by one: WIDTH -= 1 WIDTH //= 2 NUMBER_OF_WORMS = 12 # (!) Try changing this value. MIN_WORM_LENGTH = 6 # (!) Try changing this value. MAX_WORM_LENGTH = 16 # (!) Try changing this value. ALL_COLORS = bext.ALL_COLORS NORTH = 'north' SOUTH = 'south' EAST = 'east' WEST = 'west' BLOCK = chr(9608) # Character 9608 is '█'
def walk_dirs(session: Session, stats: DedupStats, dirs_to_walk: List[Path], confirm: bool, verbose: bool, walking_target_dirs: bool) -> None: """Traverse the `dirs_to_walk` directories, deleting any duplicate files in the target directories. Does not follow symbolic links. Args: session: handle to target file database stats: accumulates statistics about visited files and directories dirs_to_walk: list of directories to recursively traverse confirm: if True, ask before deletion verbose: if True, print extra information walking_target_dirs: if True, `dirs_to_walk` is a list of target directories; if False, `dirs_to_walk` is a list of read-only directories. """ def abbrev(path: str) -> str: return abbrev_path(path, long_width) def target_file_dir(target: File) -> Path: tfp = Path(target.full_path).parent while tfp not in stats.dir_stats: new_tfp = tfp.parent if tfp == new_tfp: # prevent an infinite loop. this should never happen because this # function should only be called with a path to a target file after all # target directories have been added to dir_stats. raise RuntimeError(f'Cannot find {tfp} in dir_stats') tfp = new_tfp return tfp term_width = shutil.get_terminal_size((4096, 25)).columns long_width = int(term_width * 0.8) for d in dirs_to_walk: s = stats.dir_stats[d] s.is_target_dir = walking_target_dirs s.directory = str(d) skip = None for root, dirs, files in os.walk(d): # On Windows, os.walk follows symbolic directory links and directory # junctions (see Windows command mklink with /D and /J flags, respectively) # even though followlinks defaults to False. Check explicitly to avoid # following them. if os.name == 'nt' and islink(root): continue # If user previously asked to skip remainder of files and subdirectories at # a certain level of the directory we're walking, see if we're still at # that point and if so, continue skipping until os.walk moves on to a # higher level. if skip is not None: # are we still in the skip dir, or in a subdir of it? if is_same_or_subdir(root, skip): continue # we've popped back up above the dir we were skipping, so clear out the # skip value. skip = None s.total_dir_count += 1 if verbose: target_or_ro = "target" if walking_target_dirs else "read-only" print(f'Scanning {target_or_ro} directory {abbrev(root)}') for file in files: # Get the extension and size of the file we're examining. cur_file_extension = get_extension(file) cur_file_full_path = os.path.join(root, file) cur_file_size = os.path.getsize(cur_file_full_path) s.total_file_count += 1 s.total_file_size += cur_file_size # Query the database to see if there's a target file that has the same # size and extension rows = session.query(File).filter( File.size == cur_file_size).filter( File.extension == cur_file_extension) # Do a bytewise comparison of the current file against all the existing # ones in the database of target files that have the same size and # extension to see if it is a duplicate. is_dup = False for target_file in rows: is_dup = filecmp.cmp(cur_file_full_path, target_file.full_path, shallow=False) if is_dup: # We've found a duplicate file. if walking_target_dirs: s.dup_file_count += 1 s.dup_file_size += cur_file_size # cur_file_full_path, which is in the TARGET directories, # is a duplicate of target_file (also in the target # directories) which we've previously examined and put in # the database. if verbose: original = abbrev(target_file.full_path) print(f' {file} duplicates {original}') skip = remove_with_confirm(cur_file_full_path, confirm) else: # cur_file_full_path, which is in the READ-ONLY directories, # is duplicated by target_file, which is in the target # directories. We need to record the duplicate file size # in the target directory's dir_stats entry, not the # read-only dir_stats, which is what `s` is. target_stats = stats.dir_stats[target_file_dir( target_file)] target_stats.dup_file_count += 1 target_stats.dup_file_size += cur_file_size # We never delete anything from read-only, so delete # target_file. if verbose: dup = abbrev(target_file.full_path) print( f' {dup} duplicates {cur_file_full_path}') skip = remove_with_confirm(target_file.full_path, confirm) # Now that we've deleted target_file from disk (or the user # has elected not to delete it), remove it from the database. session.delete(target_file) session.commit() break # If we're traversing a target directory and have found a new unique # file, add it to the database. if walking_target_dirs and not is_dup: new_file_rec = File(cur_file_full_path, cur_file_extension, cur_file_size) session.add(new_file_rec) session.commit() if skip is not None: break
import os import sys import tty, termios import string import shutil from .charDef import * from . import colors COLUMNS, _ = shutil.get_terminal_size() ## Size of console def mygetc(): ''' Get raw characters from input. ''' fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def getchar(): ''' Character input parser. ''' c = mygetc() if ord(c) == LINE_BEGIN_KEY or \ ord(c) == LINE_END_KEY or \ ord(c) == TAB_KEY or \ ord(c) == INTERRUPT_KEY or \ ord(c) == NEWLINE_KEY:
codepage = Image.open(codepage_path).convert('RGB') character_bmps = {} print("GENERATING CHARACTERS") for c in (string.ascii_letters + string.digits + string.punctuation + ' '): ascii_code = ord(c) bounding_box = ((ascii_code % 32) * 9, math.floor(ascii_code / 32) * 16, (ascii_code % 32) * 9 + 9, math.floor(ascii_code / 32) * 16 + 16) character_bmps[ascii_code] = codepage.crop(bounding_box) print("READING IMAGE") image = Image.open(args.image).convert('RGB') filtered = image print("RESIZING IMAGE") tsize = shutil.get_terminal_size() ar = filtered.width / filtered.height if args.scale: w = tsize.columns h = math.floor((w/ar*9)/16) if h > tsize.lines: h = tsize.lines - 1 w = math.floor((h*ar*16)/9) elif args.fit: w = tsize.columns h = tsize.lines elif args.width:
def run(self, pav_cfg, args): """Print the test results in a variety of formats.""" test_ids = self._get_tests(pav_cfg, args.tests) tests = [] for id_ in test_ids: try: tests.append(TestRun.load(pav_cfg, id_)) except TestRunError as err: self.logger.warning("Could not load test %s - %s", id_, err) except TestRunNotFoundError as err: self.logger.warning("Could not find test %s - %s", id_, err) if args.re_run: if not self.update_results(pav_cfg, tests): return errno.EINVAL if args.json or args.full: if len(tests) > 1: results = {test.name: test.results for test in tests} else: # There should always be at least one test results = tests[0].results width = shutil.get_terminal_size().columns try: if args.json: output.json_dump(results, self.outfile) else: pprint.pprint(results, # ext-print: ignore stream=self.outfile, width=width, compact=True) except OSError: # It's ok if this fails. Generally means we're piping to # another command. pass return 0 else: fields = self.BASE_FIELDS + args.key results = [test.results for test in tests] def fix_timestamp(ts_str: str) -> str: """Read the timestamp text and get a minimized, formatted value.""" try: when = datetime.datetime.strptime(ts_str, '%Y-%m-%d %H:%M:%S.%f') except ValueError: return '' return output.get_relative_timestamp(when) output.draw_table( outfile=self.outfile, field_info={ 'started': {'transform': fix_timestamp}, 'finished': {'transform': fix_timestamp}, }, fields=fields, rows=results, title="Test Results" ) return 0
def __del__(self): if self.draw: print(' ' * (get_terminal_size().columns-1), end='\r')
def progress_start(self): if self._progress_started: self.progress_end() self._progress_started = True self._term_size = shutil.get_terminal_size()
def print_board(board) -> None: """Prints out the board, clearing the terminal as it does so""" print("\033[2J") # clears terminal print("\033[H") # moves to top-left of terminal for row in board: for i in row: print(" #"[i], end="") # buffer isn't flushed # flushes buffer, so the whole screen is printed at once # prevents cursor from jumping around screen print() if __name__ == "__main__": size = get_terminal_size() density = float(argv[1]) if len(argv) > 1 else 0.1 # randomly populating board board = [[random() <= density for _ in range(size.columns)] for _ in range(size.lines)] # making a glider # board[1][1] = 1 # board[2][2] = 1 # board[2][3] = 1 # board[3][1] = 1 # board[3][2] = 1 print_board(board) try: while True: print_board(board) sleep(0.05) # arbitrary delay (20 fps) that I think looks good
CONFIG_HEADER = ("""# This is the config file for your ArchiveBox collection. # # You can add options here manually in INI format, or automatically by running: # archivebox config --set KEY=VALUE # # If you modify this file manually, make sure to update your archive after by running: # archivebox init # # A list of all possible config with documentation and examples can be found here: # https://github.com/pirate/ArchiveBox/wiki/Configuration """) DERIVED_CONFIG_DEFAULTS: ConfigDefaultDict = { 'TERM_WIDTH': { 'default': lambda c: lambda: shutil.get_terminal_size( (100, 10)).columns }, 'USER': { 'default': lambda c: getpass.getuser() or os.getlogin() }, 'ANSI': { 'default': lambda c: DEFAULT_CLI_COLORS if c['USE_COLOR'] else {k: '' for k in DEFAULT_CLI_COLORS.keys()} }, 'REPO_DIR': { 'default': lambda c: Path(__file__).resolve().parent.parent.parent }, 'PYTHON_DIR': { 'default': lambda c: c['REPO_DIR'] / PYTHON_DIR_NAME },
if compatibility_mode: chars = ['|', '|', '|', '-', '-', '-', ' ', ' '] if bars_col is not None: bars_col = text_col text_col += ' ' # Add a coloured space before maxlen += 1 # Add a coloured space after logo_len += 2 i += 1 else: chars = ['╻', '┃', '╹', '╶', '─', '╴', '▄', '▀'] tab_a = [''] * 8 nb_a = 0 if i < len(sys.argv) and re.fullmatch(r'--aaa+', sys.argv[i]) != None: import shutil nb_cols = shutil.get_terminal_size().columns if nb_cols > logo_len: nb_a = (nb_cols - logo_len) // 13 tab_a[0] = '8888b ' * nb_a tab_a[1] = '88888b ' * nb_a tab_a[2] = '888Y88b ' * nb_a tab_a[3] = '888 Y88b ' * nb_a tab_a[4] = '888 Y88b ' * nb_a tab_a[5] = '888 Y88b ' * nb_a tab_a[6] = '8888888888b ' * nb_a tab_a[7] = '888 Y88b ' * nb_a logo_len += 13 * nb_a reset = '\033[0m' fixed_padding = ' ' * (maxlen - 16) + reset insa_padding = ' ' * (maxlen - len(insa) + 1) + reset
def downloadURL(URL, dest, decompress=False, index=None): dest = os.path.abspath(os.path.expanduser(dest)) dest_dir, filename = os.path.split(dest) # if not os.path.isdir(dest_dir): os.makedirs(dest_dir) if not os.path.isdir(dest_dir): raise RuntimeError( 'Failed to create destination directory to download {}'.format( URL)) # message = filename if len(message) > 30: message = message[:10] + '...' + message[-16:] # dest_tmp = dest + '.tmp_{}'.format(os.getpid()) term_width = shutil.get_terminal_size((80, 20)).columns try: env.logger.debug('Download {} to {}'.format(URL, dest)) prog = ProgressBar(message, disp=env.verbosity > 1) sig = FileTarget(dest) if os.path.isfile(dest): if env.sig_mode == 'construct': prog.done(message + ': \033[32m writing signature\033[0m') sig.write_sig() prog.done(message + ': \033[32m signature calculated\033[0m') return True elif env.sig_mode == 'ignore': prog.done(message + ': \033[32m use existing\033[0m') return True else: prog.done(message + ': \033[32m Validating signature\033[0m') if sig.validate(): prog.done(message + ': \033[32m Validated\033[0m') return True else: prog.done(message + ':\033[91m Signature mismatch\033[0m') # # Stop using pycurl because of libcurl version compatibility problems # that happen so often and difficult to fix. Error message looks like # # Reason: Incompatible library version: pycurl.cpython-35m-darwin.so # requires version 9.0.0 or later, but libcurl.4.dylib provides version 7.0.0 # #with open(dest_tmp, 'wb') as f: # c = pycurl.Curl() # c.setopt(pycurl.URL, str(URL)) # c.setopt(pycurl.WRITEFUNCTION, f.write) # c.setopt(pycurl.SSL_VERIFYPEER, False) # c.setopt(pycurl.NOPROGRESS, False) # c.setopt(pycurl.PROGRESSFUNCTION, prog.curlUpdate) # c.perform() #if c.getinfo(pycurl.HTTP_CODE) == 404: # prog.done(message + ':\033[91m 404 Error {}\033[0m'.format(' '*(term_width - len(message) - 12))) # try: # os.remove(dest_tmp) # except OSError: # pass # return False with open(dest_tmp, 'wb') as f: try: u = urllib.request.urlopen(str(URL)) try: file_size = int(u.getheader("Content-Length")) except: file_size = None file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) prog.urllibUpdate(file_size, file_size_dl) except urllib.error.HTTPError as e: prog.done(message + ':\033[91m {} Error\033[0m'.format(e.code)) try: os.remove(dest_tmp) except OSError: pass return False except Exception as e: prog.done(message + ':\033[91m {}\033[0m'.format(e)) try: os.remove(dest_tmp) except OSError: pass return False # os.rename(dest_tmp, dest) decompressed = 0 if decompress: if zipfile.is_zipfile(dest): prog.done(message + ':\033[91m Decompressing\033[0m') zip = zipfile.ZipFile(dest) zip.extractall(dest_dir) names = zip.namelist() for name in names: if not os.path.isfile(os.path.join(dest_dir, name)): return False else: sig.add(os.path.join(dest_dir, name)) decompressed += 1 elif tarfile.is_tarfile(dest): prog.done(message + ':\033[91m Decompressing\033[0m') with tarfile.open(dest, 'r:*') as tar: tar.extractall(dest_dir) # only extract files files = [x.name for x in tar.getmembers() if x.isfile()] for name in files: if not os.path.isfile(os.path.join(dest_dir, name)): return False else: sig.add(os.path.join(dest_dir, name)) decompressed += 1 elif dest.endswith('.gz'): prog.done(message + ':\033[91m Decompressing\033[0m') decomp = dest[:-3] with gzip.open(dest, 'rb') as fin, open(decomp, 'wb') as fout: buffer = fin.read(100000) while buffer: fout.write(buffer) buffer = fin.read(100000) sig.add(decomp) decompressed += 1 decompress_msg = '' if not decompressed else ' ({} file{} decompressed)'.format( decompressed, '' if decompressed <= 1 else 's') prog.done(message + ':\033[32m downloaded{} {}\033[0m'.format( decompress_msg, ' ' * (term_width - len(message) - 13 - len(decompress_msg)))) # if a md5 file exists # if downloaded files contains .md5 signature, use them to validate # downloaded files. if os.path.isfile(dest + '.md5'): prog.done(message + ':\033[91m Verifying md5 signature\033[0m') with open(dest + '.md5') as md5: rec_md5 = md5.readline().split()[0].strip() obs_md5 = fileMD5(dest, partial=False) if rec_md5 != obs_md5: prog.done(message + ':\033[91m MD5 signature mismatch\033[0m') env.logger.warning( 'md5 signature mismatch for downloaded file {} (recorded {}, observed {})' .format(filename[:-4], rec_md5, obs_md5)) prog.done(message + ':\033[91m MD5 signature verified\033[0m') except Exception as e: if env.verbosity > 2: sys.stderr.write(get_traceback()) env.logger.error('Failed to download: {}'.format(e)) return False finally: # if there is something wrong still remove temporary file if os.path.isfile(dest_tmp): os.remove(dest_tmp) sig.write_sig() return os.path.isfile(dest)
import shutil import pandas as pd import logging logging.captureWarnings(True) from src.linear_regression.linreg import linreg_main from src.logistic_regression.logreg import logreg_main from src.nearest_neighbours_classifier.kNN_classifier import kNN_classifier_main from src.nearest_neighbours_regression.kNN_regression import kNN_regression_main from src.random_forest_classifier.rf_classifier import rfc_main from src.random_forest_regression.rf_regression import rfr_main from src.support_vector_classifier.sv_classifier import sv_classifier_main from src.support_vector_regression.sv_regression import sv_regression_main from src.neural_networks_regressor.NN_regressor import kerasNN_regression_main columns = shutil.get_terminal_size().columns fun_map = { "linreg": linreg_main, "logreg": logreg_main, "kNN_classifier": kNN_classifier_main, "kNN_regression": kNN_regression_main, "random_forest_classifier": rfc_main, "random_forest_regression": rfr_main, "support_vector_classifier": sv_classifier_main, "support_vector_regression": sv_regression_main, "NN_regression": kerasNN_regression_main } ### -------------------------------------------------- ### ### change the below variables according to your needs ### ### -------------------------------------------------- ###
def main(): parser = argparse.ArgumentParser() parser.add_argument("-1", action="store_true", default=False, help="list items on individual lines") parser.add_argument("-a", "--all", action="store_true", default=False, help="do not ignore entires starting with .") parser.add_argument("-B", "--ignore-backups", action="store_true", default=False, help="do not list implied entires ending with ~") parser.add_argument("-d", "--directory", action="store_true", default=False, help="list directories themselves, not their contents") parser.add_argument("-f", "--file", action="store_true", default=False, help="list files only, not directories") parser.add_argument("-F", "--classify", action="store_true", default=False, help="append indicator (one of */=>@|) to entries") parser.add_argument( "-I", "--ignore", metavar="PATTERN", help="do not list implied entries matching shell PATTERN") parser.add_argument("-l", "--long", action="store_true", default=False, help="use a long listing format") parser.add_argument("-n", "--numeric-uid-gid", action="store_true", default=False, help="like -l, but list numeric user and group IDs") parser.add_argument("-R", "--recursive", action="store_true", default=False, help='list subdirectories recursively') parser.add_argument( "--report", action="store_true", default=False, help="brief report about number of files and directories") parser.add_argument("-t", "--tree", metavar="DEPTH", type=int, nargs='?', const=3, help="max tree depth") parser.add_argument("--version", action="store_true", default=False, help="display current version number") parser.add_argument("--si", action="store_true", default=False, help="display current version number") parser.add_argument( "FILE", default=".", nargs=argparse.REMAINDER, help= "List information about the FILEs (the current directory by default).") args = parser.parse_args() if args.version: with open(os.path.join( Path(__file__).parent.absolute(), '_version.py')) as VERSION_FILE: version = VERSION_FILE.read() print("colorls version " + version.split('"')[1]) if not args.FILE: args.FILE = ["."] report = list() term_size = shutil.get_terminal_size() for FILE in args.FILE: report.append(process_dir(FILE, args, size=term_size)) print() # TODO: Fix report - only shows current directory and next correctly. Likely overwritten dictionary values if args.report and report: print("\n --- REPORT ---") for n in report: for k, v in reversed(n.items()): print(f"{k} -> {v}")
def draw_table(outfile, field_info, fields, rows, border=False, pad=True, title=None, table_width=None): """Prints a table from the given data, dynamically setting the column width. :param outfile: The file-like object to write to. :param dict field_info: Should be a dictionary of field names where the value is a dict of: - title (optional) - The column header for this field. Defaults to the field name, capitalized. - transform (optional) - a function that takes the field value, transforms it in some way, and returns the result to be inserted into the table. - format (optional) - a format string in the new style format syntax. It will expect the data for that row as arg 0. IE: '{0:2.2f}%'. - default (optional) - A default value for the field. A blank is printed by default. - no_wrap (optional) - a boolean that determines if a field will be wrapped or not. - max_width (optional) - the max width for a given field. - min_width (optional) - the min width for a given field. :param list fields: A list of the fields to include, in the given order. These also serve as the default column titles (Capitalized). :param list(dict) rows: A list of data dictionaries. A None may be included to denote that a horizontal line row should be inserted. :param bool border: Put a border around the table. Defaults False. :param bool pad: Put a space on either side of each header and row entry. Default True. :param str title: Add the given title above the table. Default None :param int table_width: By default size table to the terminal width. If set size the table to this width instead. :return: None **Examples** A simple table: :: from pavilion import utils # The table data is expected as a list of dictionaries with identical keys. # Not all dictionary fields will necessarily be used. Commands will # typically generate the rows dynamically... rows = [ {'color': 'BLACK', 'code': 30, 'usage': 'Default'}, {'color': 'RED', 'code': 31, 'usage': 'Fatal Errors'}, {'color': 'GREEN', 'code': 32, 'usage': 'Warnings'}, {'color': 'YELLOW', 'code': 33, 'usage': 'Discouraged'}, {'color': 'BLUE', 'code': 34, 'usage': 'Info'} ] # The data columns to print (and their default column labels). columns = ['color', 'usage'] utils.draw_table( outfile=sys.stdout, field_info={}, # Produces a table like this: # # Color | Usage # --------+-------------- # BLACK | Default # RED | Fatal Errors # GREEN | Warnings # YELLOW | Discouraged # BLUE | Info A more complicated example: :: from pavilion import utils import sys rows = [ {'color': 'BLACK', 'code': 30, 'usage': 'Default'}, {'color': 'RED', 'code': 31, 'usage': 'Fatal Errors'}, {'color': 'GREEN', 'code': 32, 'usage': 'Warnings'}, {'color': 'YELLOW', 'code': 33, 'usage': 'Discouraged'}, {'color': 'BLUE', 'code': 34, 'usage': 'Info'}, {'color': 'CYAN', 'code': 35}, {'color': 'MAGENTA', 'code': 36}, ] columns = ['color', 'code', 'usage'] field_info = { # Colorize the color column with a transform function. 'color': { 'transform': lambda t: utils.ANSIString(t, utils.COLORS.get(t)), }, # Format and add a better column header to the 'code' column. 'code': { 'title': 'ANSI Code', 'format': '0x{0:x}', }, # Put in a default for our missing usage values. # (The default is just to leave the column empty.) 'usage': { 'default': 'Whatever you want.' } } utils.draw_table( outfile=sys.stdout, field_info=field_info, fields=columns, rows=rows, # Add a border. Why not? border=True, # No padding between the data and column seperators. pad=False, title="A Demo Table." ) # Produces a table like this (plus with the color names in color): # # +-------+---------+------------------+ # | A Demo Table. | # +-------+---------+------------------+ # |Color |ANSI Code|Usage | # +-------+---------+------------------+ # |BLACK |0x1e |Default | # |RED |0x1f |Fatal Errors | # |GREEN |0x20 |Warnings | # |YELLOW |0x21 |Discouraged | # |BLUE |0x22 |Info | # |CYAN |0x23 |Whatever you want.| # |MAGENTA|0x24 |Whatever you want.| # +-------+---------+------------------+ """ # Column widths populates with a range of values, the minimum being the # length of the given field title, and the max being the longest entry in # that column column_widths = {} titles = {} for field in fields: default_title = field.replace('_', ' ').capitalize() field_title = field_info.get(field, {}).get('title', default_title) # Gets the length of column title, adds it to the list of column widths column_widths[field] = [len(field_title)] titles[field] = ANSIString(field_title) blank_row = {} for field in fields: blank_row[field] = ANSIString('') formatted_rows = [] for row in rows: formatted_row = {} if row is None: # 'None' rows just produce an empty row. formatted_rows.append(blank_row) continue for field in fields: # Get the data, or it's default if provided. info = field_info.get(field, {}) data = row.get(field, info.get('default', '')) # Transform the data, if a transform is given data = info.get('transform', lambda a: a)(data) # Format the data col_format = info.get('format', '{0}') try: formatted_data = col_format.format(data) except ValueError: print("Bad format for data. Format: {0}, data: {1}" .format(col_format, repr(data)), file=sys.stderr) raise if isinstance(data, ANSIString): ansi_code = data.code else: ansi_code = None # Cast all data as ANSI strings, so we can get accurate lengths # and use ANSI friendly text wrapping. data = ANSIString(formatted_data, code=ansi_code) # Appends the length of all rows at a given field longer than the # title. Effectively forces that the minimum column width be no # less than the title. if len(data) > len(titles[field]): column_widths[field].append(len(data)) formatted_row[field] = data formatted_rows.append(formatted_row) rows = formatted_rows rows.insert(0, titles) # Gets dictionary with largest width, and smallest width for each field. # Also updates the default column_Widths dictionary to hold the max values # for each column. max_widths = {field: max(widths) for field, widths in column_widths.items()} min_widths = {} for field in fields: all_words = sum([row[field].split() for row in rows], []) longest_word = '' for word in all_words: if len(word) > len(longest_word): longest_word = word min_widths[field] = min(MAX_WORD_LEN, len(longest_word)) for field in fields: # If user specified ignoring wrapping on a given field, it will set the # minimum width equal to the largest entry in that field. if 'no_wrap' in field_info.get(field, {}).keys(): min_widths[field] = max_widths[field] # If user defined a max width for a given field it overrides the # maximum width here. if 'max_width' in field_info.get(field, {}).keys(): max_widths[field] = field_info[field]['max_width'] # If user defined a min width for a given field it overrides the # minimum width here. if 'min_width' in field_info.get(field, {}).keys(): min_widths[field] = field_info[field]['min_width'] # Ensures that the max width for a given field is always larger or # at least equal to the minimum field width. if max_widths[field] < min_widths[field]: max_widths[field] = min_widths[field] # Add divider widths divider_size = 3 if pad else 1 deco_size = divider_size * (len(fields) - 1) # Add the side spacing deco_size += 2 if pad else 0 # Add border widths if border: border_size = 2 if pad else 1 deco_size += border_size * 2 # Gets the effective window width. if table_width is None: table_width = shutil.get_terminal_size().columns table_width -= deco_size # Makes sure window is at least large enough to display are smallest # possible table total_min = sum(min_widths.values()) if total_min > table_width: table_width = total_min extra_spaces = table_width - sum(min_widths.values()) final_widths = min_widths.copy() def calc_wraps(fld_, width_): """Calculate the wraps for a given field at the given width across all rows.""" return sum([len(row_[fld_].wrap(width=width_)) for row_ in formatted_rows]) field_wraps_by_width = defaultdict(dict) incr = 1 # Consume the additional spaces available by growing the columns according # to which column would benefit the most from the extra space. If there # is a tie, increase the number of spaces considered. growable_fields = fields.copy() while extra_spaces and growable_fields: best_fields = [] best_diff = 0 # Find the 'best_fields' to add 'incr' byte to. for field in growable_fields.copy(): curr_width = final_widths[field] incr_width = curr_width + incr max_width = max_widths[field] if curr_width == max_width: growable_fields.remove(field) continue curr_wraps = field_wraps_by_width[field].get( curr_width, calc_wraps(field, curr_width)) # Make sure we don't exceed the max width for the column. incr_wraps = field_wraps_by_width[field].get( incr_width, calc_wraps(field, curr_width + incr)) diff = (curr_wraps-incr_wraps) if incr_width > max_width: # Don't consider this column for an increase if the increase # exceeds the max width for the column. continue elif incr_width == max_width and diff == 0: # Increasing the width of this column won't help. Skip it from # now on. growable_fields.remove(field) continue # If this field beats all previous, make it the best. if diff > best_diff: best_diff = diff best_fields = [field] # Don't consider fields whose diff is 0. elif diff == 0: continue # If we tie, add it to the list of the best. elif diff == best_diff: best_fields.append(field) if len(best_fields) == 1: # Add incr bytes to the winner extra_spaces -= incr final_widths[best_fields[0]] += incr incr = 1 elif incr == extra_spaces: # If we've run out of bytes to consider, distribute them evenly # amongst the tied winners. extra_spaces -= incr for field in best_fields: final_widths[field] += incr//len(best_fields) else: # Otherwise, increase the increment and try again. incr += 1 title_length = sum(final_widths.values()) if pad: title_length = title_length + 2 * len(fields) title_format = ' {{0:{0}s}} '.format(title_length) # Add 2 dashes to each break line if we're padding the data brk_pad_extra = 2 if pad else 0 horizontal_break = '+'.join(['-' * (final_widths[field] + brk_pad_extra) for field in fields]) if border: horizontal_break = '+' + horizontal_break + '+' title_format = '|' + title_format + '|' horizontal_break += '\n' title_format += '\n' try: if border: outfile.write(horizontal_break) if title: outfile.write(title_format.format(title)) outfile.write(horizontal_break) for row_i in range(len(rows)): row = rows[row_i] wrap_rows = defaultdict(lambda: defaultdict(lambda: '')) # Creates wrap list that holds list of strings for the wrapped text for field in fields: wraps = row[field].wrap(width=final_widths[field]) for wrap_i in range(len(wraps)): wrap_row = wrap_rows[wrap_i] wrap_row[field] = wraps[wrap_i] # Turn the wrapped rows into a list sorted by index wrap_rows = [wrap_rows[i] for i in sorted(list(wrap_rows.keys()))] for wrap_row in wrap_rows: outfile.write(format_row(wrap_row, fields, final_widths, pad, border)) if row_i == 0: outfile.write(horizontal_break) except IOError: # We may get a broken pipe, especially when the output is piped to # something like head. It's ok, just move along. pass
#UZR - Universal Realtime Zone #requires python 3 import sys import time import shutil #for terminal width import datetime import calendar TERMINAL_WIDTH, _ = shutil.get_terminal_size() t = time.localtime() localOffsetSeconds = calendar.timegm(t) - calendar.timegm( time.gmtime(time.mktime(t))) if t.tm_isdst: localOffsetSeconds -= 3600 #get base offset without DST if len(sys.argv) >= 2: localOffsetSeconds = int(sys.argv[1]) print("UZR - Universal Realtime Zone (%d seconds offset)" % localOffsetSeconds) def getUZR(offsetSeconds=0): n = datetime.datetime.now(datetime.timezone.utc) n += datetime.timedelta(0, offsetSeconds) if (n.month == 3) and (n.day >= 2) and (n.day <= 31): n += datetime.timedelta( 0, (n.day - 1) * 2 * 60) #03/02: add 2 minutes ... 03/31: add 60 minutes elif (n.month >= 4) and (n.month <= 10): n += datetime.timedelta(0, 3600) #usual DST return n
def __init__(self, *args, **kwargs): # help position must be aligned with __init__.parseopts.description kwargs['max_help_position'] = 30 kwargs['indent_increment'] = 1 kwargs['width'] = shutil.get_terminal_size()[0] - 2 super().__init__(*args, **kwargs)
def init_cli(): """Initialize the CLI.""" def init_config_file(): """"Initialize configuration file.""" config = configparser.ConfigParser() config["USER"] = { "username": "******", "tier": "None", "default_protocol": "None", "initialized": "0", "dns_leak_protection": "1", "custom_dns": "None", "check_update_interval": "3", } config["metadata"] = { "last_api_pull": "0", "last_update_check": str(int(time.time())), } with open(CONFIG_FILE, "w") as f: config.write(f) change_file_owner(CONFIG_FILE) logger.debug("pvpn-cli.cfg initialized") check_root() if not os.path.isdir(CONFIG_DIR): os.mkdir(CONFIG_DIR) logger.debug("Config Directory created") change_file_owner(CONFIG_DIR) # Warn user about reinitialization try: if int(get_config_value("USER", "initialized")): print("An initialized profile has been found.") overwrite = input( "Are you sure you want to overwrite that profile? [y/N]: ") if overwrite.strip().lower() != "y": print("Quitting...") sys.exit(1) except KeyError: pass term_width = shutil.get_terminal_size()[0] print("[ -- PROTONVPN-CLI INIT -- ]\n".center(term_width)) init_msg = ( "ProtonVPN uses two different sets of credentials, one for the " "website and official apps where the username is most likely your " "e-mail, and one for connecting to the VPN servers.\n\n" "You can find the OpenVPN credentials at " "https://account.protonvpn.com/settings.\n\n" "--- Please make sure to use the OpenVPN credentials ---\n" ).splitlines() for line in init_msg: print(textwrap.fill(line, width=term_width)) # Set ProtonVPN Username and Password ovpn_username, ovpn_password = set_username_password(write=False) # Set the ProtonVPN Plan user_tier = set_protonvpn_tier(write=False) # Set default Protocol user_protocol = set_default_protocol(write=False) # Enable or disable DNS Leak Protection dns_leak_protection, custom_dns = set_dns_protection(write=False) protonvpn_plans = {1: "Free", 2: "Basic", 3: "Plus", 4: "Visionary"} print() print( "You entered the following information:\n", "Username: {0}\n".format(ovpn_username), "Password: {0}\n".format("*" * len(ovpn_password)), "Tier: {0}\n".format(protonvpn_plans[user_tier]), "Default protocol: {0}\n".format(user_protocol.upper()), "DNS Leak Protection: {0}\n".format( 'On' if dns_leak_protection else 'Off'), ) if custom_dns: print("Custom DNS: {0}\n".format(custom_dns)) else: print() user_confirmation = input( "Is this information correct? [Y/n]: ").strip().lower() if user_confirmation == "y" or user_confirmation == "": print("Writing configuration to disk...") init_config_file() pull_server_data() make_ovpn_template() # Change user tier to correct value if user_tier == 4: user_tier = 3 user_tier -= 1 set_config_value("USER", "username", ovpn_username) set_config_value("USER", "tier", user_tier) set_config_value("USER", "default_protocol", user_protocol) set_config_value("USER", "dns_leak_protection", dns_leak_protection) set_config_value("USER", "custom_dns", custom_dns) set_config_value("USER", "killswitch", 0) with open(PASSFILE, "w") as f: f.write("{0}\n{1}".format(ovpn_username, ovpn_password)) logger.debug("Passfile created") os.chmod(PASSFILE, 0o600) set_config_value("USER", "initialized", 1) print() print("Done! Your account has been successfully initialized.") logger.debug("Initialization completed.") else: print() print("Please restart the initialization process.") sys.exit(1)
def lines(self, line_length=None): """ Generates a list with lines. These lines form the text drawing. Args: line_length (int): Optional. Breaks the circuit drawing to this length. This useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using shutil.get_terminal_size(). If you don't want pagination at all, set line_length=-1. Returns: list: A list of lines with the text drawing. """ if line_length is None: line_length = self.line_length if not line_length: if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules): line_length = 80 else: line_length, _ = get_terminal_size() noqubits = len(self.qregs) layers = self.build_layers() layer_groups = [[]] rest_of_the_line = line_length for layerno, layer in enumerate(layers): # Replace the Nones with EmptyWire layers[layerno] = EmptyWire.fillup_layer(layer, noqubits) TextDrawing.normalize_width(layer) if line_length == -1: # Do not use pagination (aka line breaking. aka ignore line_length). layer_groups[-1].append(layer) continue # chop the layer to the line_length (pager) layer_length = layers[layerno][0].length if layer_length < rest_of_the_line: layer_groups[-1].append(layer) rest_of_the_line -= layer_length else: layer_groups[-1].append(BreakWire.fillup_layer( len(layer), '»')) # New group layer_groups.append([BreakWire.fillup_layer(len(layer), '«')]) rest_of_the_line = line_length - layer_groups[-1][-1][0].length layer_groups[-1].append( InputWire.fillup_layer( self.wire_names(with_initial_value=False))) rest_of_the_line -= layer_groups[-1][-1][0].length layer_groups[-1].append(layer) rest_of_the_line -= layer_groups[-1][-1][0].length lines = [] for layer_group in layer_groups: wires = [i for i in zip(*layer_group)] lines += TextDrawing.draw_wires(wires, self.vertically_compressed) return lines
def size(self): # TODO: Support small, medium, large image sizes return int(shutil.get_terminal_size()[1] / 2)
def main(): """Only called when the module is called directly as a script.""" main_parser = argparse.ArgumentParser( prog="headerexposer", description=f"{BANNER}\nAnalyse the security of your website's" " headers!", formatter_class=argparse.RawDescriptionHelpFormatter, epilog="If you want to write a new baseline.json," " consider using baseline_schema.json\n" f"({he.BASELINE_SCHEMA_PATH}) " "as documentation.\n\n" "Authors:\n" " * Frédéric Proux, senior pentester at Beijaflore\n" " * Alexandre Janvrin, pentester at Beijaflore\n" " (https://www.beijaflore.com/en/)\n\n" "License: AGPLv3+\n\n" 'This software is provided "as is", without ' "any warranty of any kind, express or implied.\n" "For more information, please consult " "https://github.com/LivinParadoX/headerexposer.", ) subparsers = main_parser.add_subparsers( title="commands", description="Use [command] -h for additional help.", dest="command", ) analysis = subparsers.add_parser( "analyse", help="Analyse a given url's headers." ) demo = subparsers.add_parser( "demo", help="Show a demonstration of what would be printed for sample" " headers with the selected baseline.json.", ) show = subparsers.add_parser( "show", help="Show the selected baseline without doing any analysis." ) analysis.set_defaults(func=analyse) demo.set_defaults(func=baseline_demo) show.set_defaults(func=show_baseline) # Okay this may seem ugly but I want this argument available # *everywhere*. for parser in [main_parser, analysis, demo, show]: with resources.path("headerexposer", "baseline_short.json") as baseline_path: parser.add_argument( "-b", "--baseline-path", help="Path to the baseline.json file for the header analysis" f" (default: {baseline_path}).", default=baseline_path, ) request_options = analysis.add_argument_group("request options") request_options.add_argument( "-m", "--method", help='HTTP method to use for the request. Default: "GET".', choices=["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"], default="GET", ) request_options.add_argument( "--params", help="Add multiple, ampersand-separated parameters to the request.", ) group = request_options.add_mutually_exclusive_group() group.add_argument( "-d", "--data", help="Data to append to the request." " Mutually exclusive with --file.", ) group.add_argument( "-f", "--file", help="Path to a file to append to the request." " Mutually exclusive with --data.", ) request_options.add_argument( "-H", "--headers", help="Add multiple, newline-separated HTTP headers to the request.", ) request_options.add_argument( "-C", "--cookies", help="Add multiple, semicolon-separated cookies to the request.", ) request_options.add_argument( "-U", "--username", help="username to use in Basic/Digest/Custom HTTP Authentication.", ) request_options.add_argument( "-P", "--password", help="password to use in Basic/Digest/Custom HTTP Authentication.", ) request_options.add_argument( "-t", "--timeout", type=float, help="How many seconds to wait for the server to send data" " before giving up, as float.", ) request_options.add_argument( "-r", "--disallow-redirects", action="store_true", help="Disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection." " Defaults to enabled redirection.", ) request_options.add_argument( "-p", "--proxy", help="Proxy to use for the request." ) request_options.add_argument( "-k", "--verify", action="store_true", help="Verify SSL certificates. Defaults to an insecure behavior.", ) request_options.add_argument( "-c", "--cert", help="Optional path to the SSL client .pem certificate" " for client authentication.", ) request_options.add_argument( "-a", "--user-agent", help="User Agent to use." " Defaults to a recent Google Chrome user agent.", default="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1" " (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1", ) analysis.add_argument("url", help="The url to test.") # Okay this may seem ugly but I want these argument available # *everywhere*. And at the end, not like --baseline-path. for parser in [main_parser, analysis, demo, show]: output_options = parser.add_argument_group("output options") output_options.add_argument( "--detailed", action="store_true", help="Print additional details: the request parameters," " the response details," " headers' descriptions, and references.", ) output_options.add_argument( "--no-explanation-colors", action="store_true", help="Suppress colors in explanations, except in reference links.", ) output_options.add_argument( "-w", "--max-width", type=int, help="The maximum width of the output. Defaults to the screen" f" width ({shutil.get_terminal_size().columns} columns).", default=shutil.get_terminal_size().columns, ) args = main_parser.parse_args() if args.command is None: main_parser.print_help() else: # Hack dégeulasse de quand j'ai modifié le comportement par défaut args.short = not args.detailed if args.detailed: with resources.path("headerexposer", "baseline_short.json") as baseline_path: args.baseline_path = baseline_path baseline = he.load_baseline( args.baseline_path, args.no_explanation_colors ) if not args.short: print(BANNER) args.func(args, baseline)
def __init__(self, parent): self._parent = parent self.width = shutil.get_terminal_size((80, 20)).columns - 5
def _get_status_message(self) -> str: """ Creates a string that combines the status messages of all threads. The current download progress of a file is displayed in percent per Thread. A total display is also created, showing the total amount downloaded in relation to what still needs to be downloaded. @return: A status message string """ # to limit the output to one line limits = shutil.get_terminal_size() # Starting with a carriage return to overwrite the last message progressmessage = f'\033[{len(self.threads)}A\r' threads_status_message = '' threads_total_downloaded = 0 for thread in self.threads: i = thread.thread_id # A thread status contains it id and the progress # of the current file thread_percentage = self.thread_report[i]['percentage'] thread_current_url = self.thread_report[i]['current_url'] if self.thread_report[i]['external_dl'] is not None: thread_current_url = 'ExtDL: ' + self.thread_report[i]['external_dl'] if not thread.is_alive(): thread_percentage = 100 thread_current_url = 'Finished!' if len(thread_current_url) + 13 > limits.columns: thread_current_url = thread_current_url[0 : limits.columns - 15] + '..' threads_status_message += '\033[KT%2i: %3i%% - %s\n' % (i, thread_percentage, thread_current_url) threads_total_downloaded += self.thread_report[i]['total'] extra_totalsize = self.thread_report[i]['extra_totalsize'] if extra_totalsize is not None and extra_totalsize != -1: self.total_to_download += extra_totalsize self.thread_report[i]['extra_totalsize'] = -1 progressmessage += threads_status_message percentage = 100 if self.total_to_download != 0: percentage = int(threads_total_downloaded * 100 / self.total_to_download) # The overall progress also includes the total size that needs to be # downloaded and the size that has already been downloaded. progressmessage_line = 'Total: %3s%% %12s/%12s' % ( percentage, format_bytes(threads_total_downloaded), format_bytes(self.total_to_download), ) progressmessage_line += ' | Files: %5s/%5s' % (len(self.report['success']), self.total_files) diff_to_last_status = threads_total_downloaded - self.last_threads_total_downloaded speed = self.calc_speed(self.last_status_timestamp, time.time(), diff_to_last_status) progressmessage_line += ' | ' + self.format_speed(speed) if len(progressmessage_line) > limits.columns: progressmessage_line = progressmessage_line[0 : limits.columns] progressmessage_line = '\033[K' + progressmessage_line progressmessage += progressmessage_line self.last_status_timestamp = time.time() self.last_threads_total_downloaded = threads_total_downloaded return progressmessage
import shutil import re from simple_term_menu import TerminalMenu from abilities import * from dungeons_and_dragons import * from cyberpunk_2077 import * from disco_elysium import * if __name__ == "__main__": menu_width=shutil.get_terminal_size((80,20))[0] menu_style={ "menu_cursor":None, "menu_cursor_style":None, # The style of the shown cursor. "menu_highlight_style":( "bg_black", "bold", ), # The style of the selected menu entry. "search_key":None, "search_highlight_style":( "fg_red", ), # The style of matched search strings. "show_shortcut_hints":False, "shortcut_key_highlight_style":(
def __init__(self, prog): terminal_width = shutil.get_terminal_size().columns os.environ['COLUMNS'] = str(terminal_width) max_help_position = min(max(24, terminal_width // 3), 40) super().__init__(prog, max_help_position=max_help_position)
def get_terminal_height(): return shutil.get_terminal_size().lines
def _generate_table_report_adv(self, results, show_mem=True, show_inside_server=True): tbl_report = "=" * 52 + "\n" table = Texttable() table.set_deco(Texttable.HEADER) header_names = ["size", "samples", "iters", "min", "avg", "max"] header_types = ["t", "i", "i", "f", "f", "f"] col_allign = ["l", "r", "r", "r", "r", "r"] if show_mem: header_names.extend(["used (MB)", "total (MB)"]) header_types.extend(["f", "f"]) col_allign.extend(["r", "r"]) if show_inside_server: header_names.extend(["prediction"]) header_types.extend(["f"]) col_allign.extend(["r"]) table.set_cols_dtype(header_types) table.set_cols_align(col_allign) try: terminal_size = shutil.get_terminal_size() table.set_max_width(terminal_size.columns) except Exception as e: pass rows = [header_names] for res in results: row = [res.name, res.samples, res.iterations] d = res.stats_obj.dict_report(CMRunTests.REPORT_NAME) row.extend([d["min"], d["avg"], d["max"]]) server_stats = json.loads( res.server_stats) if res.server_stats else None if show_mem: if server_stats and "mem_info" in server_stats: mem_info = server_stats["mem_info"] row.extend([mem_info["drum_rss"], mem_info["total"]]) else: row.extend([CMRunTests.NA_VALUE, CMRunTests.NA_VALUE]) rows.append(row) if show_inside_server: if server_stats: time_info = server_stats["time_info"] row.extend([ time_info["run_predictor_total"]["avg"], ]) else: row.extend([ CMRunTests.NA_VALUE, ]) table.add_rows(rows) tbl_report = table.draw() return tbl_report
def print_report(stats: DedupStats) -> None: """Print a summary of the findings to stdout. Args: stats: cumulative statistics on files scanned and duplicates found. """ def print_count_and_size(count: int, size: int, path: str) -> None: """Print the line item of the report.""" file_count = abbrev_count(count, use_SI=True) file_size = '(' + abbrev_count(size, use_SI=False) + ')' print(f'{file_count:>{file_count_width}}', ' ', f'{file_size:>{file_size_width}}', ' ' * gutter_width, path, sep='') # A sample report might look like this: # # Duplicates Directory # ------------------ -------------------------------------------------------- # 112.3K (123.1 MiB) /home/users/someone/desktop/wallpaper # 38 (37 MiB) \\server-name\share\username\LongName...\Vacation\Photos # 1 (1023 B) /home/users/userb/files/music/workout # ------------------ -------------------------------------------------------- # 112.3K (160 MiB) Total # # Scanned 123,456,789 files in 12,345 directories # Removed 32 empty directories # Completed in HH:MM:SS # # Note the elision in the first directory name: the width is made to fit the # current console width (down to MIN_WIDTH). # if stdout has been redirected then the fallback width will be used. # TODO max of this len and localized "Duplicates" string dir_stat_recs = sorted(stats.dir_stats.values(), key=lambda s: s.dup_file_size, reverse=True) longest_target_path = 0 total_file_count = 0 total_dir_count = 0 total_dup_files = 0 total_dup_size = 0 total_empty_dirs = 0 for dir_stat in dir_stat_recs: if dir_stat.is_target_dir: longest_target_path = max(longest_target_path, len(dir_stat.directory)) total_dup_files += dir_stat.dup_file_count total_dup_size += dir_stat.dup_file_size total_empty_dirs += dir_stat.empty_dir_count total_file_count += dir_stat.total_file_count total_dir_count += dir_stat.total_dir_count # determine the widths of the columns file_count_width = len('123.4K') file_size_width = len('(123.4 MiB)') duplicates_col_width = file_count_width + len(' ') + file_size_width gutter_width = 2 fallback_width = duplicates_col_width + gutter_width + 4096 # compute space available to the directory column, but use no more than required to # show the longest path term_width = shutil.get_terminal_size((fallback_width, 25)).columns dir_col_width = min(term_width - (duplicates_col_width + gutter_width), longest_target_path) print(f'{"Duplicates":^{duplicates_col_width}}', end=' ' * gutter_width) print('Directory') hline = '-' * duplicates_col_width + ' ' * gutter_width + '-' * dir_col_width print(hline) # print duplicate count, size, and directory for s in dir_stat_recs: if s.is_target_dir: print_count_and_size(s.dup_file_count, s.dup_file_size, abbrev_path(s.directory, dir_col_width)) print(hline) print_count_and_size(total_dup_files, total_dup_size, 'Total') print() # TODO localize thousands separator print( f'Scanned {total_file_count:,} files in {total_dir_count:,} directories' ) if total_empty_dirs == 1: print('Removed 1 empty directory') elif total_empty_dirs > 1: print(f'Removed {total_empty_dirs} empty directories') print( f'Completed in {str(datetime.timedelta(seconds=int(stats.runtime)))}')