def visit_immutation(self, node, children): context = self._final_context() child_type = children[0].expr_name if child_type == 'preview': if self.tool == 'httpie': command = ['http'] + context.httpie_args(self.method, quote=True) else: assert self.tool == 'curl' command = ['curl'] + context.curl_args(self.method, quote=True) click.echo(' '.join(command)) elif child_type == 'action': output = BytesIO() try: env = Environment(stdout=output, is_windows=False) httpie_main(context.httpie_args(self.method), env=env) content = output.getvalue() finally: output.close() # XXX: Work around a bug of click.echo_via_pager(). When you pass # a bytestring to echo_via_pager(), it converts the bytestring with # str(b'abc'), which makes it "b'abc'". if six.PY2: content = unicode(content, 'utf-8') # noqa else: content = str(content, 'utf-8') click.echo_via_pager(content) return node
def print_formated(fmatches, format): if format == 'json': from json import dumps click.echo(dumps(fmatches, sort_keys=True, indent=2, separators=(',', ': '))) elif format == 'yaml': from yaml import dump click.echo(dump(fmatches, default_flow_style=False, width=80, indent=4)) else: to_print = [] for filename, comments in sorted(fmatches.items()): to_print.append(click.style(filename, fg='red', bold=True)) for type, lines in sorted(comments.items()): to_print.append(click.style(type, fg='blue', bold=True)) for number, content in sorted(lines.items()): to_print.append(' '.join([ click.style(' line %d -' % number, fg='white'), click.style(content, bold=True) ])) to_print.append('\n') if format == 'plain': click.echo('\n'.join(to_print)) else: click.echo_via_pager('\n'.join(to_print))
def check_result(returncode, output='', success_message='DONE', error_message='ERROR', raise_exception=True, ask_for_details=True, show_details=True, ): if returncode == 0: click.secho(success_message, fg='green') else: click.secho(error_message, fg='red') if returncode != 0: if ask_for_details: show_details = click.confirm( ' Show details?', default=False, abort=False, prompt_suffix=' ', show_default=True, err=False, ) if show_details: click.echo_via_pager(output) elif show_details and output: click.echo(output) if raise_exception: raise click.ClickException( 'Something went wrong, please look at the logs.')
def main(): """Summarize text.""" # todo: use argparse or click or something like that input_dict = get_input() summarizer = Summarizer() result = summarizer.summarize( input_dict['text'], input_dict['title'], 'Undefined', 'Undefined', ) result = summarizer.sortScore(result) result = summarizer.sortSentences(result[:30]) # todo: paginate this output click.clear() summary = "Summary of '%s':\n\n" % input_dict['title'] for r in result: summary += r['sentence'] + "\n\n" summary += "Press [q] to exit." click.echo_via_pager(summary) click.clear()
def paged_display(source, sep='\n'): """Display paginated text so that the user can scroll through it; works with any iterable""" if not source: return text = sep.join(str(x) for x in source) click.echo_via_pager(text)
def delete(config, a, se): """ Deletes all the directory content (files, dirs) """ if a and click.confirm("Delete all contents of " + config.dir_to_use + " ?"): click.echo("Attempting to delete: " + str(analyzer.get_entries_count()) + " entries...\n") cleaner.delete_dir_content(config.dir_to_use) filemanager.write_cleanup_report(cleaner.cleanup_data, config.app_dir) filemanager.pickle_data("last-cleanup", cleaner.cleanup_data, config.app_dir) # Make clean up data persistent click.echo("\nDeletion complete!") click.echo("* Deletions: " + str(cleaner.cleanup_data["deletions"])) click.echo("* Deletion size: " + converter.human_readable_size(cleaner.cleanup_data["size"])) click.echo("* Errors: " + str(cleaner.cleanup_data["error_count"])) if se: try: last_cleanup = filemanager.unpickle_data("last-cleanup") click.echo("Errors encountered during the last deletion [" + last_cleanup["datetime"] + "]:") click.echo("Total: " + str(last_cleanup["error_count"]) + "\n") click.echo_via_pager("\n\n".join("* %s" % error for error in last_cleanup["errors"])) except FileNotFoundError: click.echo("No error data was found.")
def problem_035(limit,verbose): """Circular primes. The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime. There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97. How many circular primes are there below one million? """ cache = PrimeCache(limit + 1) cache.ensure_factors_for(limit) primes = set(cache.primes) with click.progressbar(primes) as bar: circular = [p for p in bar if is_circular_prime(primes, p)] if verbose > 0: circular.sort() lines = ("{}: {}".format(p, ', '.join(str(n) for n in rotate_number(p))) for p in circular) result = '\n'.join(lines) if len(circular) > 100: click.echo_via_pager(result) else: click.echo(result) click.echo(len(circular))
def timeline(ctx, pager, limit, twtfile, sorting, timeout, porcelain, source, cache): """Retrieve your personal timeline.""" if source: source_obj = ctx.obj["conf"].get_source_by_nick(source) if not source_obj: logger.debug("Not following {0}, trying as URL".format(source)) source_obj = Source(source, source) sources = [source_obj] else: sources = ctx.obj["conf"].following tweets = get_remote_tweets(sources, limit, timeout, cache) if twtfile and not source: source = Source(ctx.obj["conf"].nick, ctx.obj["conf"].twturl, file=twtfile) tweets.extend(get_local_tweets(source, limit)) tweets = sort_and_truncate_tweets(tweets, sorting, limit) if not tweets: return if pager: click.echo_via_pager(style_timeline(tweets, porcelain)) else: click.echo(style_timeline(tweets, porcelain))
def view_url(self, url): """View the given url. :type index: int :param index: The index for the given item, used with the gh view [index] commend. :type url: str :param url: The url to view """ contents = self.generate_url_contents(url) header = click.style('Viewing ' + url + '\n\n', fg=self.config.clr_message) contents = header + contents contents += click.style(('\nView this article in a browser with' ' the -b/--browser flag.\n'), fg=self.config.clr_message) contents += click.style(('\nPress q to quit viewing this ' 'article.\n'), fg=self.config.clr_message) if contents == '{"error":"Not Found"}\n': click.secho('Invalid user/repo combination.') return color = None if platform.system() == 'Windows': color = True click.echo_via_pager(contents, color)
def timeclock(globs: ROAttrDict, task: str, duration: str): """Generate ledger compatible timeclock file. \f Args: globs: Global options object task: Task name to operate on duration: Time window to filter on """ if task == 'default': # Lazy way to remove duplicate argument definitions task = None events = filter_events(globs, task, duration) def gen_output(): if events.running(): yield ';; Running event not included in output!\n' for event in events: if not event.delta: continue yield f'i {event.start:%F %T} {event.task}\n' yield f'o {event.start + event.delta:%F %T}' \ f'{" ; " + event.message if event.message else ""}\n' if events.running(): yield ';; Running event not included in output!\n' click.echo_via_pager(gen_output())
def write(self, data): if isinstance(data, six.binary_type): data = data.decode('utf-8') # echo_via_pager() already appends a '\n' at the end of text, # so we use rstrip() to remove extra newlines (#89) click.echo_via_pager(data.rstrip())
def ledger(globs: ROAttrDict, task: str, duration: str, rate: str): """Generate ledger compatible data file. \f Args: globs: Global options object task: Task name to operate on duration: Time window to filter on rate: Rate to assign hours in report """ if task == 'default': # Lazy way to remove duplicate argument definitions task = None events = filter_events(globs, task, duration) def gen_output(): if events.running(): yield ';; Running event not included in output!\n' for event in events: if not event.delta: continue end = event.start + event.delta hours = event.delta.total_seconds() / 3600 yield f'{event.start:%F * %H:%M}-{end:%H:%M}' yield ' (task:{}) {:.2f}h{}{}\n'.format( event.task, hours, ' @ {}'.format(rate) if rate else '', ' ; {}'.format(event.message) if event.message else '' ) if events.running(): yield ';; Running event not included in output!\n' click.echo_via_pager(gen_output())
def print_all(container): prefix = container.get_log_prefix() lines = container.logs(_iter=False, follow=self.follow, lines=self.lines).split('\n') text = '\n'.join(prefix + to_unicode(l) for l in lines) click.echo_via_pager(text.strip('\n'))
def selectFolder(M, moreMessages = ""): resp, data = M.list('""', '*') #print(data) listAllFolders = listFolderNames(data, moreMessages) if not listAllFolders: listAllFolders = listFolderNames(data, "") listFolders = listAllFolders while listFolders: if (listFolders.count('\n') == 0): nF = nameFolder(listFolders) nF = nF.strip('\n') print("nameFolder", nF) return(nF) rows, columns = os.popen('stty size', 'r').read().split() if listFolders.count('\n') > int(rows) - 2: click.echo_via_pager(listFolders) else: print(listFolders) print(len(listFolders)) inNameFolder = input("Folder number [-cf] Create Folder // A string to select a smaller set of folders ") if (len(inNameFolder) > 0) and (inNameFolder == '-cf'): nfn = input("New folder name? ") iFolder = createFolder(M, nfn, moreMessages) return(iFolder) #listFolders = iFolder listFolders = listFolderNames(listFolders.split('\n'), inNameFolder) if (not inNameFolder): print("Entra") listAllFolders = listFolderNames(data, "") listFolders = "" if (not listFolders): listFolders = listAllFolders
def project_list(obj, grouped): """List all your projects""" api_response = obj.get_projects() header = ('Slug', 'Name', 'Organisation') # get all users + organisations groups = { 'users': { account['id']: {'name': 'Personal', 'projects': []} for account in api_response['accounts'] if account['type'] == 'user' }, 'organisations': { account['id']: {'name': account['name'], 'projects': []} for account in api_response['accounts'] if account['type'] == 'organisation' } } # sort websites into groups for website in api_response['websites']: organisation_id = website['organisation_id'] if organisation_id: owner = groups['organisations'][website['organisation_id']] else: owner = groups['users'][website['owner_id']] owner['projects'].append((website['domain'], website['name'])) accounts = itertools.chain( groups['users'].items(), groups['organisations'].items() ) def sort_projects(items): return sorted(items, key=lambda x: x[0].lower()) # print via pager if grouped: output_items = [] for group, data in accounts: projects = data['projects'] if projects: output_items.append( u'{title}\n{line}\n\n{table}\n\n'.format( title=data['name'], line='=' * len(data['name']), table=table(sort_projects(projects), header[:2]) ) ) output = os.linesep.join(output_items).rstrip(os.linesep) else: # add account name to all projects projects = [ each + (data['name'],) for group, data in accounts for each in data['projects'] ] output = table(sort_projects(projects), header) click.echo_via_pager(output)
def execute_command(self, text, query): logger = self.logger try: output, query = self._evaluate_command(text) except KeyboardInterrupt: # Restart connection to the database self.pgexecute.connect() logger.debug("cancelled query, sql: %r", text) click.secho("cancelled query", err=True, fg='red') except NotImplementedError: click.secho('Not Yet Implemented.', fg="yellow") except OperationalError as e: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) self._handle_server_closed_connection() except Exception as e: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') else: try: if self.output_file and not text.startswith(('\\o ', '\\? ')): try: with open(self.output_file, 'a', encoding='utf-8') as f: click.echo(text, file=f) click.echo('\n'.join(output), file=f) click.echo('', file=f) # extra newline except IOError as e: click.secho(str(e), err=True, fg='red') else: click.echo_via_pager('\n'.join(output)) except KeyboardInterrupt: pass if self.pgspecial.timing_enabled: # Only add humanized time display if > 1 second if query.total_time > 1: print('Time: %0.03fs (%s)' % (query.total_time, humanize.time.naturaldelta(query.total_time))) else: print('Time: %0.03fs' % query.total_time) # Check if we need to update completions, in order of most # to least drastic changes if query.db_changed: with self._completer_lock: self.completer.reset_completions() self.refresh_completions(persist_priorities='keywords') elif query.meta_changed: self.refresh_completions(persist_priorities='all') elif query.path_changed: logger.debug('Refreshing search path') with self._completer_lock: self.completer.set_search_path( self.pgexecute.search_path()) logger.debug('Search path: %r', self.completer.search_path) return query
def main(number): url = RFC_URL.format(number) response = fetch_url(url) try: click.echo_via_pager(response) except BrokenPipeError: pass
def cli_list(): """List all the tags.""" resp = [] for tag in tag_list(): resp.append('{name} ({count})'.format(**tag)) click.echo_via_pager('\n'.join(resp))
def stats(pod_path, full): """Displays statistics about the pod.""" root = os.path.abspath(os.path.join(os.getcwd(), pod_path)) pod = pods.Pod(root, storage=storage.FileStorage) try: stats = stats_lib.Stats(pod, full=full) click.echo_via_pager("\n\n".join(stats.to_tables())) except pods.Error as e: raise click.ClickException(str(e))
def cli(mcf, schema, schema_local, output): if mcf is None or (schema is None and schema_local is None): raise click.UsageError("Missing arguments") else: content = render_template(mcf, schema=schema, schema_local=schema_local) if output is None: click.echo_via_pager(content) else: output.write(content)
def run_command(command, pager=False): if pager is True: click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) click.echo_via_pager(p.stdout.read()) else: click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) click.echo(p.stdout.read())
def cli(sort): """ Lists all available problems. """ problems = sorted(data.problems, key=lambda problem: problem[sort.lower()]) problem_list = ((problem['id'], problem['name'], '%d%%' % problem['difficulty']) for problem in problems) table = tabulate(problem_list, TABLE_HEADERS, tablefmt='fancy_grid') click.echo_via_pager(table)
def migrate(mcf, output): if mcf is None: raise click.UsageError('Missing arguments') else: content = configparser2yaml(mcf).decode('utf-8') if output is None: click.echo_via_pager(content) else: output.write(content)
def show(repository): if not repository: repos = Repository.list_repos() str_repos = '\n'.join(sorted(repos)) click.echo(str_repos) else: repo = Repository(repository) content = yaml.safe_dump(dict(repo.get_contents()), default_flow_style=False) click.echo_via_pager(content)
def list_containers(ctx): """List all Cloud Files containers for your specified region. If not in interactive mode, these are printed to stdout; interactive mode will paginate the output for you. """ conn = auth.conn(ctx) if ctx.interactive: click.echo_via_pager("\n".join("Container: %s" % c.name for c in conn.object_store.containers())) else: for container in conn.object_store.containers(): click.echo(container.name)
def list_objects(ctx, containername): """List all Cloud Files objects in the specified container. If not in interactive mode, these are printed to stdout; interactive mode will paginate the output for you. """ conn = auth.conn(ctx) if ctx.interactive: click.echo_via_pager("\n".join("Container: %s" % c.name for c in conn.object_store.objects(containername))) else: for object in conn.object_store.objects(containername): click.echo(object.name)
def dump_container(container): """Dump container info.""" container.ship = None container.shipment = None for volume in container.volumes.values(): if hasattr(volume, 'files'): for file in volume.files.values(): if hasattr(file, 'context'): file.context = 'skipped' click.echo_via_pager(yaml.dump(container))
def inspect_stats(pod_path, full): """Displays statistics about the pod.""" root = os.path.abspath(os.path.join(os.getcwd(), pod_path)) pod = pods.Pod(root, storage=storage.FileStorage) try: with pod.profile.timer('grow_inspect_stats'): pod_stats = stats_lib.Stats(pod, full=full) click.echo_via_pager('\n\n'.join(pod_stats.to_tables())) except pods.Error as err: raise click.ClickException(str(err)) return pod
def print_home_timeline(tweets,pager): """Print the home timeline of the user.""" s="" for i,tweet in enumerate(tweets): s += ((click.style('[%d] ' %(i+1), bold=True, fg="blue") + click.style('@%s - ' %tweet.author.screen_name, bold=True, fg="cyan") + click.style('%s' %tweet.text)).encode('utf_8')+'\n\n') if pager: click.echo_via_pager(s) else: click.echo(s)
def pager(__text: str, pager: Optional[bool] = False): """Pass output through pager. Args: __text: Text to page pager: Pager to use """ if pager: click.echo_via_pager(__text) else: click.echo(__text)
def pager(): """Demonstrates using the pager.""" lines = [] for x in range_type(200): lines.append('%s. Hello World!' % click.style(str(x), fg='green')) click.echo_via_pager('\n'.join(lines))
def search(color, fields, limit, separator, query): """Search the Shodan database""" key = get_api_key() # Create the query string out of the provided tuple query = ' '.join(query).strip() # Make sure the user didn't supply an empty string if query == '': raise click.ClickException('Empty search query') # For now we only allow up to 1000 results at a time if limit > 1000: raise click.ClickException('Too many results requested, maximum is 1,000') # Strip out any whitespace in the fields and turn them into an array fields = [item.strip() for item in fields.split(',')] if len(fields) == 0: raise click.ClickException('Please define at least one property to show') # Perform the search api = shodan.Shodan(key) try: results = api.search(query, limit=limit) except shodan.APIError as e: raise click.ClickException(e.value) # Error out if no results were found if results['total'] == 0: raise click.ClickException('No search results found') # We buffer the entire output so we can use click's pager functionality output = u'' for banner in results['matches']: row = u'' # Loop over all the fields and print the banner as a row for field in fields: tmp = u'' value = get_banner_field(banner, field) if value: field_type = type(value) # If the field is an array then merge it together if field_type == list: tmp = u';'.join(value) elif field_type in [int, float]: tmp = u'{}'.format(value) else: tmp = escape_data(value) # Colorize certain fields if the user wants it if color: tmp = click.style(tmp, fg=COLORIZE_FIELDS.get(field, 'white')) # Add the field information to the row row += tmp row += separator # click.echo(out + separator, nl=False) output += row + u'\n' # click.echo('') click.echo_via_pager(output)
def less(file): """Lê um arquivo usando paginação.""" click.echo_via_pager(f'{linha}\n' for linha in file.readlines())
from prompt_toolkit import prompt from prompt_toolkit.history import FileHistory from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.completion import Completer, Completion import click from fuzzyfinder import fuzzyfinder from pygments.lexers.sql import SqlLexer SQLKeywords = ['select', 'from', 'insert', 'update', 'delete', 'drop'] class SQLCompleter(Completer): def get_completions(self, document, complete_event): word_before_cursor = document.get_word_before_cursor(WORD=True) matches = fuzzyfinder(word_before_cursor, SQLKeywords) for m in matches: # 匹配成功,则预先显示第一个模糊匹配值 yield Completion(m, start_position=-len(word_before_cursor)) while 1: user_input = prompt( u"SQL>", # 命令提示符 history=FileHistory("history.txt"), # 历史输入记录文件 auto_suggest=AutoSuggestFromHistory(), # 历史输入不显示 completer=SQLCompleter(), lexer=SqlLexer) # wenti当前多字符无法高亮 click.echo_via_pager(user_input) # 输出分页器 if (user_input == "exit"): break
def _handle_failure(self, element, task, failure): full_name = task.full_name # Handle non interactive mode setting of what to do when a job fails. if not self._interactive_failures: if self.context.sched_error_action == _SchedulerErrorAction.TERMINATE: self.stream.terminate() elif self.context.sched_error_action == _SchedulerErrorAction.QUIT: self.stream.quit() elif self.context.sched_error_action == _SchedulerErrorAction.CONTINUE: pass return # Interactive mode for element failures with self._interrupted(): summary = ( "\n{} failure on element: {}\n".format(failure.action_name, full_name) + "\n" + "Choose one of the following options:\n" + " (c)ontinue - Continue queueing jobs as much as possible\n" + " (q)uit - Exit after all ongoing jobs complete\n" + " (t)erminate - Terminate any ongoing jobs and exit\n" + " (r)etry - Retry this job\n") if failure.logfile: summary += " (l)og - View the full log file\n" if failure.sandbox: summary += " (s)hell - Drop into a shell in the failed build sandbox\n" summary += "\nPressing ^C will terminate jobs and exit\n" choices = ["continue", "quit", "terminate", "retry"] if failure.logfile: choices += ["log"] if failure.sandbox: choices += ["shell"] choice = "" while choice not in ["continue", "quit", "terminate", "retry"]: click.echo(summary, err=True) self._notify( "BuildStream failure", "{} on element {}".format(failure.action_name, full_name)) try: choice = click.prompt( "Choice:", default="continue", err=True, value_proc=_prefix_choice_value_proc(choices)) except (click.Abort, SystemError): # In some cases, the readline buffer underlying the prompt gets corrupted on the second CTRL+C # This throws a SystemError, which doesn't seem to be problematic for the rest of the program # Ensure a newline after automatically printed '^C' click.echo("", err=True) choice = "terminate" # Handle choices which you can come back from # if choice == "shell": click.echo( "\nDropping into an interactive shell in the failed build sandbox\n", err=True) try: unique_id, _ = element self.stream.shell( None, _Scope.BUILD, self.shell_prompt, isolate=True, usebuildtree=True, unique_id=unique_id, ) except BstError as e: click.echo( "Error while attempting to create interactive shell: {}" .format(e), err=True) elif choice == "log": with open(failure.logfile, "r") as logfile: content = logfile.read() click.echo_via_pager(content) if choice == "terminate": click.echo("\nTerminating all jobs\n", err=True) self.stream.terminate() else: if choice == "quit": click.echo("\nCompleting ongoing tasks before quitting\n", err=True) self.stream.quit() elif choice == "continue": click.echo( "\nContinuing with other non failing elements\n", err=True) elif choice == "retry": click.echo("\nRetrying failed job\n", err=True) unique_id = element[0] self.stream.retry_job(task.action_name, unique_id)
def tutorial(ctx, cli_obj, width): """Display osxphotos tutorial.""" width = width[0] if width else 100 click.echo_via_pager(tutorial_help(width=width))
def library_search(query, sort_a0, sort_gbw, sort_delay, sort_vnoise, sort_vcorner, sort_inoise, sort_icorner, sort_vmax, sort_imax, sort_sr, show_table, paged, save_data, plot_voltage_noise, plot_current_noise, plot_gain, save_voltage_noise_figure, save_current_noise_figure, save_gain_figure, fstart, fstop, npoints): """Search Zero op-amp library. Op-amp parameters listed in the library can be searched: model (model name), a0 (open loop gain), gbw (gain-bandwidth product), delay, vnoise (flat voltage noise), vcorner (voltage noise corner frequency), inoise (flat current noise), icorner (current noise corner frequency), vmax (maximum output voltage), imax (maximum output current), sr (slew rate) The parser supports basic comparison and logic operators: == (equal), != (not equal), > (greater than), >= (greater than or equal), < (less than), <= (less than or equal), & (logic AND), | (logic OR) Clauses can be grouped together with parentheses: (vnoise < 10n & inoise < 10p) | (vnoise < 100n & inoise < 1p) The query engine supports arbitrary expressions. The 'a0' parameter can be specified in magnitude or decibels. For decibels, append 'dB' (case insensitive) to the value. The results are sorted sequentially in the order that each parameter appears in the search query (left to right). The sort direction (descending or ascending) depends on the type of parameter. The sort direction for parameter 'X' can be overridden using the corresponding '--sort-X' flag. Specify 'ASC' for ascending and 'DESC' for descending order. """ engine = LibraryQueryEngine() # Define sort order based on defaults and user preferences. Models are always alphabetical. sort_order = {"model": False, "a0": sort_a0 == "DESC", "gbw": sort_gbw == "DESC", "delay": sort_delay == "DESC", "vnoise": sort_vnoise == "DESC", "vcorner": sort_vcorner == "DESC", "inoise": sort_inoise == "DESC", "icorner": sort_icorner == "DESC", "vmax": sort_vmax == "DESC", "imax": sort_imax == "DESC", "sr": sort_sr == "DESC"} # Get results. try: devices = engine.query(query, sort_order=sort_order) except (LibraryParserError, ValueError) as error: click.echo(str(error), err=True) click.echo("Add --help for syntax help.", err=True) sys.exit(1) if not devices: click.echo("No op-amps found.") sys.exit(0) nmodel = len(devices) if nmodel == 1: opstr = "op-amp" else: opstr = "op-amps" opamps = [] rows = [] for device in devices: rows.append([str(getattr(device, param)) for param in engine.parameters]) opamp = OpAmp(model=OpAmpLibrary.format_name(device.model), node1="input", node2="gnd", node3="output", **LIBRARY.get_data(device.model)) opamps.append(opamp) table = tabulate(rows, engine.parameters, tablefmt=CONF["format"]["table"]) if show_table: click.echo(f"{nmodel} {opstr} found:") if paged: click.echo_via_pager(table) else: click.echo(table) if save_data: for path in save_data: pieces = os.path.splitext(path.name) if not len(pieces) == 2: click.echo(f"Path {path} extension invalid.", err=True) sys.exit(1) # Remove leading full stop. extension = pieces[1][1:] if extension.lower() not in FILE_FORMAT_DELIMITERS: click.echo(f"File format '{extension}' not recognised.", err=True) sys.exit(1) delimiter = FILE_FORMAT_DELIMITERS[extension] with open(path.name, 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=delimiter) writer.writerow(engine.parameters) writer.writerows(rows) def _plot_save_figure(plot_flag, save_flag, plot_type): """Plot and/or save an op-amp plot of a particular type.""" if plot_flag or save_flag: plotter = plot_type(fstart=fstart, fstop=fstop, npoints=npoints) plotter.plot(opamps) if save_flag: for save_path in save_flag: # NOTE: use figure file's name so that Matplotlib can identify the file type # appropriately. plotter.save(save_path.name) if plot_flag: plotter.show() _plot_save_figure(plot_voltage_noise, save_voltage_noise_figure, OpAmpVoltageNoisePlotter) _plot_save_figure(plot_current_noise, save_current_noise_figure, OpAmpCurrentNoisePlotter) _plot_save_figure(plot_gain, save_gain_figure, OpAmpGainPlotter)
async def recv(self): self.spinner.start() result = await self.websocket.recv() self.spinner.stop() click.echo_via_pager(json.dumps(result))
def execute_command(self, text, query): logger = self.logger try: output, query = self._evaluate_command(text) except KeyboardInterrupt: # Restart connection to the database self.pgexecute.connect() logger.debug("cancelled query, sql: %r", text) click.secho("cancelled query", err=True, fg='red') except NotImplementedError: click.secho('Not Yet Implemented.', fg="yellow") except OperationalError as e: if ('server closed the connection' in utf8tounicode(e.args[0])): self._handle_server_closed_connection() else: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') except Exception as e: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') else: try: if self.output_file and not text.startswith(('\\o ', '\\? ')): try: with open(self.output_file, 'a', encoding='utf-8') as f: click.echo(text, file=f) click.echo('\n'.join(output), file=f) click.echo('', file=f) # extra newline except IOError as e: click.secho(str(e), err=True, fg='red') else: click.echo_via_pager('\n'.join(output)) except KeyboardInterrupt: pass if self.pgspecial.timing_enabled: # Only add humanized time display if > 1 second if query.total_time > 1: print('Time: %0.03fs (%s)' % (query.total_time, humanize.time.naturaldelta(query.total_time))) else: print('Time: %0.03fs' % query.total_time) # Check if we need to update completions, in order of most # to least drastic changes if query.db_changed: with self._completer_lock: self.completer.reset_completions() self.refresh_completions(persist_priorities='keywords') elif query.meta_changed: self.refresh_completions(persist_priorities='all') elif query.path_changed: logger.debug('Refreshing search path') with self._completer_lock: self.completer.set_search_path( self.pgexecute.search_path()) logger.debug('Search path: %r', self.completer.search_path) return query
def validate_config(device, logfile, verbose): ''' builds a menu for displaying, deleting, and comparing configs for a device ''' counter = 0 while True: total = (len(device.configs)) click.clear() print('[DEVICE > CONFIGS]') print('==============================================') click.echo(click.style(' 1 - Device ID : {}'.format(device.device_id), fg='red')) if total > 0: click.echo(click.style(' 2 - Config : {} of {}'.format(counter + 1, total), fg='green')) click.echo(click.style(' 3 - Timestamp : {:%Y-%m-%d %H:%M:%S} UTC'.format(device.configs[counter].timestamp), fg='red')) else: click.echo(click.style(' 2 - Config : {} of {}'.format(counter, total), fg='red')) click.echo(click.style(' 3 - Timestamp :', fg='red')) print('==============================================') if total > 0: print('<V> - View Config, <C> - Compare Latest Configs, <N> - Next, <P> - Previous, <D> - Delete Config, <Q> - Quit') print('') else: print('\nThere are no remaining configs.\n') click.pause() return None choice = click.prompt('What would you like to do').upper() if choice == 'Q': return None elif choice == 'N': counter += 1 counter %= total elif choice == 'P': if counter > 0: counter -= 1 else: counter = total - 1 elif choice == 'D': if verbose: msg = 'Config {} deleted from device {}.'.format(counter,device.device_id) generate_log(logfile, msg, 'INFO') db.delete_config(device.configs[counter]) if counter > 0: counter -= 1 elif choice == 'V' or choice == '2': click.echo_via_pager(device.configs[counter].config) elif choice == 'C': if total >= 2: difference = [] for line in udiff(device.configs[0].config.split('\n'), device.configs[1].config.split('\n'),\ fromfile='{:%Y-%m-%d %H:%M:%S} UTC'.format(device.configs[0].timestamp),\ tofile='{:%Y-%m-%d %H:%M:%S} UTC'.format(device.configs[1].timestamp)): difference.append(line) click.echo_via_pager('\n'.join(difference)) else: print('\nNot enough configs to compare.\n') click.pause() else: print('\nInvalid Entry\n') click.pause()
def user(self, user_id, browser=False, text_avatar=False, limit=1000, pager=False): """List information about the logged in user. :type user_id: str :param user_id: The user id/login. If None, returns followers of the logged in user. :type browser: bool :param browser: Determines whether to view the profile in a browser, or in the terminal. :type text_avatar: bool :param text_avatar: Determines whether to view the profile avatar in plain text instead of ansi (default). On Windows this value is always set to True due to lack of support of `img2txt` on Windows. :type limit: int :param limit: The number of items to display. :type pager: bool :param pager: Determines whether to show the output in a pager, if available. """ if browser: webbrowser.open(self.GH_BASE_URL + user_id) else: user = self.config.api.user(user_id) if type(user) is null.NullObject: click.secho('Invalid user.', fg=self.config.clr_error) return output = '' output += click.style( self.avatar_setup(user.avatar_url, text_avatar)) output += click.style(user.login + '\n', fg=self.config.clr_primary) if user.company is not None: output += click.style(user.company + '\n', fg=self.config.clr_secondary) if user.location is not None: output += click.style(user.location + '\n', fg=self.config.clr_secondary) if user.email is not None: output += click.style(user.email + '\n', fg=self.config.clr_secondary) if user.type == 'Organization': output += click.style('Organization\n\n', fg=self.config.clr_tertiary) else: output += click.style('Followers: ' + str(user.followers_count) + ' | ', fg=self.config.clr_tertiary) output += click.style('Following: ' + str(user.following_count) + '\n\n', fg=self.config.clr_tertiary) output += self.repositories(self.config.api.repositories(user_id), limit, pager, print_output=False) if pager: color = None if platform.system() == 'Windows': color = True click.echo_via_pager(output, color) else: click.secho(output)
def _final_print(lines): click.echo_via_pager('\n'.join(lines))
def setup_burp(bconfcli, bconfsrv, client, host, redis, database, plugins, dry): """Setup burp client for burp-ui.""" if app.config['BACKEND'] != 'burp2': click.echo(click.style('Sorry, you can only setup the Burp 2 client', fg='red'), err=True) sys.exit(1) if not app.config['STANDALONE']: click.echo(click.style('Sorry, only the standalone mode is supported', fg='red'), err=True) sys.exit(1) try: msg = app.load_modules(True) except Exception as e: msg = str(e) if msg: _die(msg, 'setup_burp') from .misc.parser.utils import Config from .app import get_redis_server import difflib import tempfile parser = app.client.get_parser() orig = source = None conf_orig = [] if dry: try: with open(app.conf.options.filename) as fil: conf_orig = fil.readlines() except: pass orig = source = app.conf.options.filename (_, temp) = tempfile.mkstemp() app.conf.options.filename = temp # handle migration of old config files if app.conf.section_exists('Burp2'): if app.conf.rename_section('Burp2', 'Burp', source): click.echo(click.style('Renaming old [Burp2] section', fg='blue')) app.conf._refresh(True) refresh = False if not app.conf.lookup_section('Burp', source): refresh = True if not app.conf.lookup_section('Global', source): refresh = True if (database or redis) and not app.conf.lookup_section('Production', source): refresh = True if refresh: app.conf._refresh(True) def _edit_conf(key, val, attr, section='Burp', obj=app.client): if val and (((key not in app.conf.options[section]) or (key in app.conf.options[section] and val != app.conf.options[section][key])) and getattr(obj, attr) != val): app.conf.options[section][key] = val app.conf.options.write() click.echo( click.style( 'Adding new option: "{}={}" to section [{}]'.format( key, val, section), fg='blue')) return True return False def _color_diff(line): if line.startswith('+'): return click.style(line, fg='green') elif line.startswith('-'): return click.style(line, fg='red') elif line.startswith('^'): return click.style(line, fg='blue') return line refresh = False refresh |= _edit_conf('bconfcli', bconfcli, 'burpconfcli') refresh |= _edit_conf('bconfsrv', bconfsrv, 'burpconfsrv') refresh |= _edit_conf('plugins', plugins, 'plugins', 'Global', app) if refresh: app.conf._refresh(True) if redis: try: # detect missing modules import redis as redis_client # noqa import celery # noqa import socket if ('redis' not in app.conf.options['Production'] or 'redis' in app.conf.options['Production'] and app.conf.options['Production']['redis'] != redis) and \ app.redis != redis: app.conf.options['Production']['redis'] = redis rhost, rport, _ = get_redis_server(app) ret = -1 for res in socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_STREAM): if ret == 0: break af, socktype, proto, _, sa = res try: s = socket.socket(af, socktype, proto) except socket.error: continue try: ret = s.connect_ex(sa) except: continue if ret == 0: app.conf.options['Production']['celery'] = 'true' app.conf.options['Production']['storage'] = 'redis' app.conf.options['Production']['cache'] = 'redis' else: click.echo( click.style( 'Unable to contact the redis server, disabling it', fg='yellow')) app.conf.options['Production']['storage'] = 'default' app.conf.options['Production']['cache'] = 'default' if app.use_celery: app.conf.options['Production']['celery'] = 'false' app.conf.options.write() app.conf._refresh(True) except ImportError: click.echo( click.style( 'Unable to activate redis & celery. Did you ran the ' '\'pip install burp-ui[celery]\' and ' '\'pip install burp-ui[gunicorn-extra]\' commands first?', fg='yellow')) if database: try: from .ext.sql import db # noqa if ('database' not in app.conf.options['Production'] or 'database' in app.conf.options['Production'] and app.conf.options['Production']['database'] != database) and \ app.database != database: app.conf.options['Production']['database'] = database app.conf.options.write() app.conf._refresh(True) except ImportError: click.echo( click.style( 'It looks like some dependencies are missing. Did you ran ' 'the \'pip install "burp-ui[sql]"\' command first?', fg='yellow')) if dry: temp = app.conf.options.filename app.conf.options.filename = orig after = [] try: if not os.path.exists(temp) or os.path.getsize(temp) == 0: after = conf_orig else: with open(temp) as fil: after = fil.readlines() os.unlink(temp) except: pass diff = difflib.unified_diff(conf_orig, after, fromfile=orig, tofile='{}.new'.format(orig)) out = '' for line in diff: out += _color_diff(line) if out: click.echo_via_pager(out) bconfcli = bconfcli or app.conf.options['Burp'].get('bconfcli') or \ getattr(app.client, 'burpconfcli') bconfsrv = bconfsrv or app.conf.options['Burp'].get('bconfsrv') or \ getattr(app.client, 'burpconfsrv') dest_bconfcli = bconfcli if not os.path.exists(bconfsrv): click.echo(click.style( 'Unable to locate burp-server configuration, aborting!', fg='red'), err=True) sys.exit(1) confsrv = Config(bconfsrv, parser, 'srv') confsrv.set_default(bconfsrv) confsrv.parse() if host not in ['::1', '127.0.0.1']: bind = confsrv.get('status_address') if (bind and bind not in [host, '::', '0.0.0.0']) or not bind: click.echo( click.style( 'It looks like your burp server is not exposing it\'s ' 'status port in a way that is reachable by Burp-UI!', fg='yellow')) click.echo( click.style( 'You may want to set the \'status_address\' setting with ' 'either \'{}\', \'::\' or \'0.0.0.0\' in the {} file ' 'in order to make Burp-UI work'.format(host, bconfsrv), fg='blue')) status_port = confsrv.get('status_port', [4972]) if 'max_status_children' not in confsrv: click.echo( click.style( 'We need to set the number of \'max_status_children\'. ' 'Setting it to 15.', fg='blue')) confsrv['max_status_children'] = 15 status_port = status_port[0] else: max_status_children = confsrv.get('max_status_children') found = False for idx, value in enumerate(max_status_children): if value >= 15: found = True if idx >= len(status_port): status_port = status_port[-1] else: status_port = status_port[idx] break if not found: click.echo( click.style( 'We need to raise the number of \'max_status_children\'. ' 'Raising it to 15 instead of {}.'.format( max_status_children), fg='yellow')) confsrv['max_status_children'][-1] = 15 status_port = status_port[-1] if 'restore_client' not in confsrv: confsrv['restore_client'] = client else: restore = confsrv.getlist('restore_client') if client not in restore: confsrv['restore_client'].append(client) confsrv['monitor_browse_cache'] = True ca_client_dir = confsrv.get('ca_csr_dir') if ca_client_dir and not os.path.exists(ca_client_dir): try: os.makedirs(ca_client_dir) except IOError as exp: click.echo(click.style('Unable to create "{}" dir: {}'.format( ca_client_dir, exp), fg='yellow'), err=True) if confsrv.dirty: if dry: (_, dstfile) = tempfile.mkstemp() else: dstfile = bconfsrv confsrv.store(conf=bconfsrv, dest=dstfile, insecure=True) if dry: before = [] after = [] try: with open(bconfsrv) as fil: before = fil.readlines() except: pass try: with open(dstfile) as fil: after = fil.readlines() os.unlink(dstfile) except: pass diff = difflib.unified_diff(before, after, fromfile=bconfsrv, tofile='{}.new'.format(bconfsrv)) out = '' for line in diff: out += _color_diff(line) if out: click.echo_via_pager(out) if confsrv.get('clientconfdir'): bconfagent = os.path.join(confsrv.get('clientconfdir'), client) else: click.echo( click.style( 'Unable to find "clientconfdir" option, you will have to ' 'setup the agent by your own', fg='yellow')) bconfagent = os.devnull if not os.path.exists(bconfcli): clitpl = """ mode = client port = 4971 status_port = 4972 server = ::1 password = abcdefgh cname = {0} protocol = 1 pidfile = /tmp/burp.client.pid syslog = 0 stdout = 1 progress_counter = 1 network_timeout = 72000 server_can_restore = 0 cross_all_filesystems=0 ca_burp_ca = /usr/sbin/burp_ca ca_csr_dir = /etc/burp/CA-client ssl_cert_ca = /etc/burp/ssl_cert_ca-client-{0}.pem ssl_cert = /etc/burp/ssl_cert-bui-client.pem ssl_key = /etc/burp/ssl_cert-bui-client.key ssl_key_password = password ssl_peer_cn = burpserver include = /home exclude_fs = sysfs exclude_fs = tmpfs nobackup = .nobackup exclude_comp=bz2 exclude_comp=gz """.format(client) if dry: (_, dest_bconfcli) = tempfile.mkstemp() with open(dest_bconfcli, 'w') as confcli: confcli.write(clitpl) parser = app.client.get_parser() confcli = Config(dest_bconfcli, parser, 'srv') confcli.set_default(dest_bconfcli) confcli.parse() if confcli.get('cname') != client: confcli['cname'] = client if confcli.get('server') != host: confcli['server'] = host if confcli.get('status_port')[0] != status_port: c_status_port = confcli.get_raw('status_port') c_status_port[0] = status_port if confcli.dirty: if dry: (_, dstfile) = tempfile.mkstemp() else: dstfile = bconfcli confcli.store(conf=bconfcli, dest=dstfile, insecure=True) if dry: before = [] after = [] try: with open(bconfcli) as fil: before = fil.readlines() except: pass try: with open(dstfile) as fil: after = fil.readlines() os.unlink(dstfile) except: pass if dest_bconfcli != bconfcli: # the file did not exist os.unlink(dest_bconfcli) before = [] diff = difflib.unified_diff(before, after, fromfile=bconfcli, tofile='{}.new'.format(bconfcli)) out = '' for line in diff: out += _color_diff(line) if out: click.echo_via_pager(out) if not os.path.exists(bconfagent): agenttpl = """ password = abcdefgh """ if not dry: with open(bconfagent, 'w') as confagent: confagent.write(agenttpl) else: before = [] after = ['{}\n'.format(x) for x in agenttpl.splitlines()] diff = difflib.unified_diff(before, after, fromfile='None', tofile=bconfagent) out = '' for line in diff: out += _color_diff(line) if out: click.echo_via_pager(out) else: confagent = Config(bconfagent, parser, 'cli') confagent.set_default(bconfagent) confagent.parse() if confagent.get('password') != confcli.get('password'): click.echo( click.style( 'It looks like the passwords in the {} and the {} files ' 'mismatch. Burp-UI will not work properly until you fix ' 'this'.format(bconfcli, bconfagent), fg='yellow'))
def images(category): """ Show all the images' names contained in the given CATEGORY set. """ images = app.show_images_by_category(category) click.echo_via_pager('\n'.join(images))
def conditional_page(output: str, size: int): if should_page(size): click.echo_via_pager(output) else: click.echo(output)
def list_revisions(snap_name, arch): """Get the history on the store for <snap-name>. This command has an alias of `revisions`. \b Examples: snapcraft list-revisions my-snap snapcraft list-revisions my-snap --arch armhf snapcraft revisions my-snap """ releases = StoreClientCLI().get_snap_releases(snap_name=snap_name) def get_channels_for_revision(revision: int) -> List[str]: # channels: the set of channels revision was released to, active or not. channels: Set[str] = set() # seen_channel: applies to channels regardless of revision. # The first channel that shows up for each architecture is to # be marked as the active channel, all others are historic. seen_channel: Dict[str, Set[str]] = dict() for release in releases.releases: if release.architecture not in seen_channel: seen_channel[release.architecture] = set() # If the revision is in this release entry and was not seen # before it means that this channel is active and needs to # be represented with a *. if (release.revision == revision and release.channel not in seen_channel[release.architecture]): channels.add(f"{release.channel}*") # All other releases found for a revision are inactive. elif (release.revision == revision and release.channel not in channels and f"{release.channel}*" not in channels): channels.add(release.channel) seen_channel[release.architecture].add(release.channel) return sorted(list(channels)) parsed_revisions = list() for rev in releases.revisions: if arch and arch not in rev.architectures: continue channels_for_revision = get_channels_for_revision(rev.revision) if channels_for_revision: channels = ",".join(channels_for_revision) else: channels = "-" parsed_revisions.append(( rev.revision, rev.created_at, ",".join(rev.architectures), rev.version, channels, )) tabulated_revisions = tabulate( parsed_revisions, numalign="left", headers=["Rev.", "Uploaded", "Arches", "Version", "Channels"], tablefmt="plain", ) # 23 revisions + header should not need paging. if len(parsed_revisions) < 24: click.echo(tabulated_revisions) else: click.echo_via_pager(tabulated_revisions)
def echo_df(df: pd.DataFrame) -> None: """Echo a dataframe via the pager.""" click.echo_via_pager('\n'.join('\t'.join(row) for row in df.values))
def classifier(type): """ Show all the classifiers variation of the given TYPE. """ classifier = app.show_classifier_variations(type) click.echo_via_pager('\n'.join(classifier))
def _page(path): f = _open_file(path) click.echo_via_pager(f.read())
def build_table(self, view_entries, limit, pager, format_method, build_urls=True, print_output=True): """Build the table used for the gh view command. :type view_entries: list :param view_entries: A list of `github3` items. :type limit: int :param limit: Determines the number of items to show. :type format_method: callable :param format_method: A method called to format each item in the table. :type build_urls: bool :param build_urls: Determines whether to build urls for the gh view # command. :type print_output: bool :param print_output: determines whether to print the output (True) or return the output as a string (False). :rtype: str :return: the output if print_output is True, else, return None. """ if build_urls: self.build_table_urls(view_entries) index = 0 output = '' for view_entry in view_entries: index += 1 view_entry.index = index output += format_method(view_entry) + '\n' if index >= limit: break if build_urls: if len(view_entries) > limit: output += click.style( (' <Hiding ' + str(len(view_entries) - limit) + ' item(s), use -l/--limit ' + str(len(view_entries)) + ' to view all items.>\n'), fg=self.config.clr_message) if index == 0: output += click.style('No results found', fg=self.config.clr_message) elif build_urls: output += click.style(self.create_tip(index)) else: output += click.style('') if print_output: if pager: click.echo_via_pager(output) else: click.secho(output) return None else: return output
def less_cmd(ctx): click.echo_via_pager(ctx.obj["RESULT"])
def output_via_pager(self, text): if self.logfile: self.logfile.write(text) self.logfile.write('\n') click.echo_via_pager(text)
def log(watson, current, from_, to, projects, tags, year, month, week, day): """ Display each recorded session during the given timespan. By default, the sessions from the last 7 days are printed. This timespan can be controlled with the `--from` and `--to` arguments. The dates must have the format `YEAR-MONTH-DAY`, like: `2014-05-19`. You can also use special shortcut options for easier timespan control: `--day` sets the log timespan to the current day (beginning at 00:00h) and `--year`, `--month` and `--week` to the current year, month or week respectively. You can limit the log to a project or a tag using the `--project` and `--tag` options. They can be specified several times each to add multiple projects or tags to the log. Example: \b $ watson log --project voyager2 --project apollo11 Thursday 08 May 2015 (56m 33s) f35bb24 09:26 to 10:22 56m 33s apollo11 [reactor, brakes, steering, wheels, module] \b Wednesday 07 May 2015 (27m 29s) 9a1325d 09:48 to 10:15 27m 29s voyager2 [sensors, generators, probe] \b Tuesday 06 May 2015 (1h 47m 22s) 530768b 12:40 to 14:16 1h 35m 45s apollo11 [wheels] 84164f0 14:23 to 14:35 11m 37s apollo11 [brakes, steering] \b Monday 05 May 2015 (8h 18m 26s) 26a2817 09:05 to 10:03 57m 12s voyager2 [probe, generators] 5590aca 10:51 to 14:47 3h 55m 40s apollo11 c32c74e 15:12 to 18:38 3h 25m 34s voyager2 [probe, generators, sensors, antenna] \b $ watson log --from 2014-04-16 --to 2014-04-17 Thursday 17 April 2014 (4h 19m 13s) a96fcde 09:15 to 09:43 28m 11s hubble [lens, camera, transmission] 5e91316 10:19 to 12:59 2h 39m 15s hubble [camera, transmission] 761dd51 14:42 to 15:54 1h 11m 47s voyager1 [antenna] \b Wednesday 16 April 2014 (5h 19m 18s) 02cb269 09:53 to 12:43 2h 50m 07s apollo11 [wheels] 1070ddb 13:48 to 16:17 2h 29m 11s voyager1 [antenna, sensors] """ # noqa for start_time in (_ for _ in [day, week, month, year] if _ is not None): from_ = start_time if from_ > to: raise click.ClickException("'from' must be anterior to 'to'") if watson.current: if current or (current is None and watson.config.getboolean('options', 'log_current')): cur = watson.current watson.frames.add(cur['project'], cur['start'], arrow.utcnow(), cur['tags'], id="current") span = watson.frames.span(from_, to) frames_by_day = sorted_groupby( watson.frames.filter( projects=projects or None, tags=tags or None, span=span ), operator.attrgetter('day'), reverse=True ) lines = [] for i, (day, frames) in enumerate(frames_by_day): if i != 0: lines.append('') frames = sorted(frames, key=operator.attrgetter('start')) longest_project = max(len(frame.project) for frame in frames) daily_total = reduce( operator.add, (frame.stop - frame.start for frame in frames) ) lines.append( style( 'date', "{:dddd DD MMMM YYYY} ({})".format( day, format_timedelta(daily_total) ) ) ) lines.append('\n'.join( '\t{id} {start} to {stop} {delta:>11} {project} {tags}'.format( delta=format_timedelta(frame.stop - frame.start), project=style('project', '{:>{}}'.format(frame.project, longest_project)), pad=longest_project, tags=style('tags', frame.tags), start=style('time', '{:HH:mm}'.format(frame.start)), stop=style('time', '{:HH:mm}'.format(frame.stop)), id=style('short_id', frame.id) ) for frame in frames )) click.echo_via_pager('\n'.join(lines))
# _*_coding:utf-8_*_ from prompt_toolkit import prompt from prompt_toolkit.history import FileHistory from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.completion import Completer, Completion import click from fuzzyfinder import fuzzyfinder from pygments.lexers.sql import SqlLexer SQLKeywords = ['select', 'from', 'insert', 'update', 'delete', 'drop'] class SQLCompleter(Completer): def get_completions(self, document, complete_event): word_before_cursor = document.get_word_before_cursor(WORD=True) matches = fuzzyfinder(word_before_cursor, SQLKeywords) for m in matches: yield Completion(m, start_position=-len(word_before_cursor)) while 1: user_input = prompt( u'SQL>', history=FileHistory('history.txt'), auto_suggest=AutoSuggestFromHistory(), completer=SQLCompleter(), lexer=SqlLexer, ) click.echo_via_pager(user_input)
# connecting to the database import os.path # create a El-Roi instance from roi_backbone import ElRoi # load config from a JSON file (or anything outputting a python dictionary) with open("roi.conf") as f: config = json.load(f) roi = ElRoi(config) # Get a reference to webcam #0 (the default one) video_capture = cv2.VideoCapture(0) # Load a sample picture nd learn how to recognize it. click.echo_via_pager('-' * 35, 'red') click.echo_via_pager(' JIREH SYSTEMS ', 'Green') click.echo_via_pager('-' * 35, 'red') def image_files_in_folder(folder): return [ os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I) ] def convert_array(text): out = io.BytesIO(text) out.seek(0) return np.load(out)
def test_echo_via_pager(monkeypatch, capfd, cat): monkeypatch.setitem(os.environ, 'PAGER', cat) monkeypatch.setattr(click._termui_impl, 'isatty', lambda x: True) click.echo_via_pager('haha') out, err = capfd.readouterr() assert out == 'haha\n'
def ls(manager): """List patents.""" click.echo_via_pager('\n'.join( f'{patent.patent_id}\t{patent.country}\t{"|".join(drug.name for drug in patent.drugs)}' for patent in manager.list_patents()))
def less(): click.echo_via_pager('\n'.join('Line %d' % idx for idx in range(200)))
def feature(type): """ Show all the features variation of the given TYPE. """ features = app.show_feature_variations(type) click.echo_via_pager('\n'.join(features))
def pager(): """Demonstrates using the pager.""" lines = [] for x in range(200): lines.append(f"{click.style(str(x), fg='green')}. Hello World!") click.echo_via_pager("\n".join(lines))