def list_plugins(args, config, pattern='*'): """ Lists the available and installed plugins """ from rich.console import Console from rich.table import Table console = Console() table = Table(show_header=True, header_style="bold") table.add_column("Name") table.add_column("Version") table.add_column("Enabled") table.add_column("Status") found = False available = _get_available() installed = _get_installed(config) available_not_installed = set(available.keys()) - set(installed.keys()) # only installed (maybe update available?) for plugin, filename in sorted(installed.items()): if not fnmatch(plugin, pattern): continue found = True plugin_info = analyze_plugin(filename) installed_version = parse_version(plugin_info['__version__']) available_version = None if plugin in available: available_plugin_info = analyze_plugin(available[plugin]) available_version = parse_version(available_plugin_info['__version__']) status = "installed" if installed_version and available_version: if available_version > installed_version: status = "installed (^)" enabled = 'enabled' if plugin in config['main']['plugins'] and \ 'enabled' in config['main']['plugins'][plugin] and \ config['main']['plugins'][plugin]['enabled'] \ else 'disabled' table.add_row(plugin, '.'.join(installed_version), enabled, status) for plugin in sorted(available_not_installed): if not fnmatch(plugin, pattern): continue found = True available_plugin_info = analyze_plugin(available[plugin]) available_version = available_plugin_info['__version__'] table.add_row(plugin, available_version, '-', 'available') if not found: logging.info('Maybe try: pwnagotchi plugins update') return 1 console.print(table) return 0
def printRepos(): allRepos = [] repoTable = Table(title="Repos") repoTable.add_column("#") repoTable.add_column("Name") repoTable.add_column("URL") repoTable.add_column("Privacy") repoTable.add_column("Forked?") i = 0 g.get_user().update() for repo in g.get_user().get_repos(): allRepos.append(repo) i += 1 repoTable.add_row(str(i), repo.name, repo.html_url, ("Private" if repo.private else "Public"), ("Yes" if repo.fork else "No")) console.print(repoTable) return allRepos
def model_detailed_view(model: MLModel): # TODO: update the print fields dim_color = 'grey62' grid = Table.grid(padding=(0, 2)) # Basic Info grid.add_row('ID', 'Architecture', 'Framework', 'Version', 'Pretrained Dataset', 'Metric', 'Score', 'Task', style='b') grid.add_row( str(model.id), model.architecture, model.framework.name, str(model.version), model.dataset, list(model.metric.keys())[0].name, # TODO: display multiple metrics str(list(model.metric.values())[0]), model.task.name.replace('_', ' ')) converted_grid = Table.grid(padding=(0, 2)) for i in range(5): converted_grid.add_column(style=dim_color, justify='right') converted_grid.add_column() # Converted model info time_delta = humanize.naturaltime(datetime.now().astimezone() - model.create_time) converted_grid.add_row( Text('Converted Model Info', style='bold cyan3', justify='left')) converted_grid.add_row( 'Serving Engine', model.engine.name, 'Status', status_mapper[Status(model.status).name], 'Creator', model.creator, 'Created', time_delta, ) first = True for input_ in model.inputs: converted_grid.add_row( Text('Inputs', style=f'b {dim_color}') if first else '', '', 'Name', input_.name, 'Shape', str(input_.shape), 'Data Type', input_.dtype.name.split('_')[-1], 'Format', input_.format.name, ) first = False first = True for output_ in model.outputs: converted_grid.add_row( Text('Outputs', style=f'b {dim_color}') if first else '', '', 'Name', output_.name, 'Shape', str(output_.shape), 'Data Type', output_.dtype.name.split('_')[-1], 'Format', output_.format.name, ) first = False converted_grid.add_row() # Profiling results converted_grid.add_row( Text('Profiling Results', style='bold cyan3', justify='left')) if not model.profile_result: converted_grid.add_row('N.A.') else: spr = model.profile_result['static_result'] dprs = model.profile_result['dynamic_results'] # Static profiling result converted_grid.add_row( Text('Static Result', style='bold turquoise2', justify='left')) if spr == 'N.A.': converted_grid.add_row(Text('N.A.', justify='left')) else: pass converted_grid.add_row() # Dynamic profiling results converted_grid.add_row( Text('Dynamic Results', style='bold turquoise2', justify='left')) for dpr in dprs: create_time = datetime.strptime(dpr['create_time'], '%Y-%m-%dT%H:%M:%S.%f') time_delta = humanize.naturaltime(datetime.now() - create_time) converted_grid.add_row( 'Device Name', dpr['device_name'], 'IP', dpr['ip'], 'Device ID', dpr['device_id'], 'Batch Size', str(dpr['batch']), 'Created', humanize.naturaltime(time_delta), ) memory = dpr['memory'] converted_grid.add_row(Text('GPU', style='b', justify='left')) converted_grid.add_row( 'Memory Used', f'{humanize.naturalsize(memory["memory_usage"], binary=True)} ' f'/ {humanize.naturalsize(memory["total_memory"], binary=True)}', 'Util', f'{memory["utilization"] * 100:.1f} %') latency = dpr['latency']['inference_latency'] converted_grid.add_row( Text('Inference Latency', style='b', justify='left')) converted_grid.add_row( 'Average', f'{latency["avg"]:.3f} ms', 'P50', f'{latency["p50"]:.3f} ms', 'P95', f'{latency["p95"]:.3f} ms', 'P99', f'{latency["p99"]:.3f} ms', ) converted_grid.add_row( Text('Throughput', style='b', justify='left')) throughput = dpr['throughput']['inference_throughput'] converted_grid.add_row('Inference', f'{throughput:.3f} req/s') console.print(grid) console.print(converted_grid)
border_style) yield new_line if _box and show_edge: yield _Segment(_box.get_bottom(widths), border_style) yield new_line if __name__ == "__main__": # pragma: no cover from rich.console import Console from rich.highlighter import ReprHighlighter from rich.table import Table as Table table = Table( title="Star Wars Movies", caption="Rich example table", caption_justify="right", ) table.add_column("Released", header_style="bright_cyan", style="cyan", no_wrap=True) table.add_column("Title", style="magenta") table.add_column("Box Office", justify="right", style="green") table.add_row( "Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690", )
def show_summary(summary_data): table = Table(title='Test Benchmark Regression Summary') table.add_column('Model') for metric in METRICS_MAP: table.add_column(f'{metric} (expect)') table.add_column(f'{metric}') table.add_column('Date') def set_color(value, expect): if value > expect + 0.01: return 'green' elif value >= expect - 0.01: return 'white' else: return 'red' for model_name, summary in summary_data.items(): row = [model_name] for metric_key in METRICS_MAP: if metric_key in summary: metric = summary[metric_key] expect = metric['expect'] result = metric['result'] color = set_color(result, expect) row.append(f'{expect:.2f}') row.append(f'[{color}]{result:.2f}[/{color}]') else: row.extend([''] * 2) if 'date' in summary: row.append(summary['date']) else: row.append('') table.add_row(*row) console.print(table)
def configurationMenu(): clearConsole() settingsTable = Table(show_header=True, header_style="Green") settingsTable.add_column("Welcome to the twitter Markov bot") settingsTable.add_row( "Please enter the ID of the account that you want to track") console.print(settingsTable) UserId = input() User = Bot.getUser(UserId) clearConsole() settingsTable = Table(show_header=True, header_style="Green") settingsTable.add_column("Is it the right account? Y/N") settingsTable.add_row("Name: " + User.name) settingsTable.add_row("Screen Name: " + "@" + User.screen_name) settingsTable.add_row("Followers: " + str(User.followers_count)) settingsTable.add_row("Location: " + User.location) console.print(settingsTable) res = input() if res == "Y" or res == "y": settings["UserId"] = UserId settings["Name"] = User.name settings["Screen_Name"] = User.screen_name settings['Location'] = User.location with open("TMBsettings.json", "w", encoding="utf-8") as write_file: json.dump(settings, write_file) elif res == "N" or res == "n": pass
def display_B(one="opcode", two="imm[11]", three="imm[4:1]", four="funct3", five="rs1", six="rs2", seven="imm[10:5]", eight="imm[5]"): table = Table(title="B-Type", show_header=True, header_style="bold magenta", title_style="white") table.add_column("31:31(1)", width=10, justify="center") table.add_column("30:25(6)", width=10, justify="center") table.add_column("24:20(5)", width=12, justify="center") table.add_column("19:15(5)", width=10, justify="center") table.add_column("14:12(3)", width=10, justify="center") table.add_column("11:08(4)", width=10, justify="center") table.add_column("07:07(1)", width=10, justify="center") table.add_column("16:00(7)", width=14, justify="center") table.add_row(eight, seven, six, five, four, three, two, one) console.print(table)
def displayData(stocks): table = Table(show_header=True, header_style="bold magenta") table.add_column("Stock") table.add_column("Value", justify="right") table.add_column("Profit/Loss", justify="right") table.add_column("Shares", justify="right") for i in range(0, len(stocks)): table.add_row( "[bold yellow]" + stocks[i].stock + "[/bold yellow]", "[bold]" + str(stocks[i].value) + "[/bold]", colouredGains(stocks[i].moneyGained), "[bold]" + str(stocks[i].shareCount) + "[/bold]", ) update(stocks[i]) print(table) return
from rich.console import Console from rich.table import Table from data_dicts import vrf table = Table() for name in "vrf param details".split(): table.add_column(name, justify="left") for vrf_name, params in vrf.items(): for index, (param, details) in enumerate(params.items()): if type(details) == str: details = [details] if index == 0: table.add_row(vrf_name, param, "\n".join(details)) else: table.add_row("", param, "\n".join(details)) console = Console() console.print(table) # console = Console() # with console.capture() as capture: # console.print(table) # str_output = capture.get() # print(str_output) # # with open("table_output.txt", "w") as f: # f.write(str_output)
def topk_table(scores: dict, i: int, total: int, k: int = 3): """Make a new table.""" table = Table(title=f"{i}/{total} completed...") table.add_column("algorithm") table.add_column("parameters") table.add_column("ndcg (k=0.1)") table.add_column("ndcg (k=0.5)") table.add_column("ndcg (k=0.8)") for k, v in topk(scores, k): table.add_row(k[0], k[1], str(v[0]), str(v[1]), str(v[2])) return table
# pip install rich from rich.console import Console from rich.table import Column, Table console = Console() table = Table(show_header=True, header_style="bold magenta") table.add_column("Date", style="dim", width=12) table.add_column("Title") #table.add_columngit("Production Budget", justify="right") table.add_column("Production Budget", justify="right") table.add_column("Box Office", justify="right") table.add_row("Dev 20, 2019", "Star Wars: The Rise of Skywalker", "$275,000,0000", "$375,126,118") table.add_row( "May 25, 2018", "[red]Solo[/red]: A Star Wars Story", "$275,000,0000", "$393,151,347", ) table.add_row( "Dec 15, 2017", "Star Wars Ep. VIII: The Last Jedi", "$262,000,000", "[bold]$1,332,539,889[/bold]", ) console.print(table)
def print_series_to_exclude(series, total_filesize): # Setup console console = Console() # Setup table table = Table(show_footer=True, row_styles=["none", "dim"], box=box.MINIMAL, pad_edge=False) with Live(table, console=console, screen=False): # Setup table columns and totals table.add_column("Release Year") table.add_column( "Title", Text.from_markup("[b][i]Total Used Diskspace", justify="right")) table.add_column("Used Diskspace", filters.get_filesize_gb(total_filesize)) table.add_column("Seasons") table.add_column("Episodes") table.add_column("Providers") table.add_column("Ended") table.add_column("Full delete") for _, serie in series.items(): release_year = str(serie["release_year"]) title = serie["title"] diskspace = filters.get_filesize_gb(serie["filesize"]) season = filters.get_pretty_seasons(serie["seasons"]) episodes = filters.get_pretty_episodes(serie["episodes"]) providers = serie["providers"] ended = filters.bool2str(serie["ended"]) full_delete = filters.bool2str(serie["full_delete"]) # Add table rows table.add_row(release_year, title, diskspace, season, episodes, providers, ended, full_delete)
def print_movies_to_exclude(movies, total_filesize): # Setup console console = Console() # Setup table table = Table(show_footer=True, row_styles=["none", "dim"], box=box.MINIMAL, pad_edge=False) with Live(table, console=console, screen=False): # Setup table columns and totals table.add_column("Release Date") or "Unknown" table.add_column( "Title", Text.from_markup("[b][i]Total Used Diskspace", justify="right")) table.add_column("Used Diskspace", filters.get_filesize_gb(total_filesize)) table.add_column("Streaming Providers") for _, movie in movies.items(): release_date = movie["release_date"] title = movie["title"] diskspace = filters.get_filesize_gb(movie["filesize"]) providers = ", ".join(movie["providers"]) # Add table rows table.add_row(release_date, title, diskspace, providers)
def print_series_to_re_add(series): # Setup console console = Console() # Setup table table = Table(show_footer=False, row_styles=["none", "dim"], box=box.MINIMAL, pad_edge=False) with Live(table, console=console, screen=False): # Setup table columns table.add_column("Release Year") table.add_column("Title") table.add_column("Seasons") table.add_column("Episodes") table.add_column("Ended") for _, serie in series.items(): release_year = str(serie["release_year"]) title = serie["title"] season = filters.get_pretty_seasons(serie["seasons"]) episodes = filters.get_pretty_episodes(serie["episodes"]) ended = filters.bool2str(serie["ended"]) # Add table rows table.add_row(release_year, title, season, episodes, ended)
def display_table(master_key, data): table = Table(title="Your details") table.add_column("Id", justify="right", style="red", no_wrap=True) table.add_column( "Username", style="cyan", ) table.add_column("Website", style="magenta") table.add_column("Password", justify="right", style="green") for row in data: table.add_row(str(row[0]), row[1], row[2], decrypt(master_key, row[3])) return table
def unicode_names() -> int: """ Entry point for `se unicode-names` """ parser = argparse.ArgumentParser( description= "Display Unicode code points, descriptions, and links to more details for each character in a string. Useful for differentiating between different flavors of spaces, dashes, and invisible characters like word joiners." ) parser.add_argument("strings", metavar="STRING", nargs="*", help="a Unicode string") args = parser.parse_args() console = Console( highlight=False, theme=se.RICH_THEME ) # Syntax highlighting will do weird things when printing paths lines = [] table = Table(show_header=False, show_lines=True, box=box.HORIZONTALS) table.add_column("Character", style="bold", width=1, no_wrap=True) table.add_column("Code point", style="dim", no_wrap=True) table.add_column("Description") table.add_column("Link") if not sys.stdin.isatty(): for line in sys.stdin: lines.append(line.rstrip("\n")) for line in args.strings: lines.append(line) for line in lines: for character in line: table.add_row( f"{character}", "U+{:04X}".format(ord(character)), unicodedata.name(character), "[link=https://www.fileformat.info/info/unicode/char/{:04X}]Properties page[/]" .format(ord(character))) console.print(table) return 0
if res == "Y" or res == "y": settings["UserId"] = UserId settings["Name"] = User.name settings["Screen_Name"] = User.screen_name settings['Location'] = User.location with open("TMBsettings.json", "w", encoding="utf-8") as write_file: json.dump(settings, write_file) elif res == "N" or res == "n": pass if settings["UserId"] == None: configurationMenu() #Main menu table = Table(show_header=True, header_style="Green") table.add_column("Twitter Markov Bot") table.add_row("1. Post a status") table.add_row("2. Post a status with a keyword") table.add_row("3. Reply to someone\'s last tweet") table.add_row("4. update replied list") table.add_row("5. update tweet stream") table.add_row("6. refresh profile picture") table.add_row("7. Quit") bot = Bot(settings["UserId"], "data_tweets.json", "data_tweets_only_replies.json") def menu1(): entered = ''
def print_files(self): """将文件列表打印出来.""" global all_file if path.path == "": now_path = "/" else: now_path = path.path all_file = [] files_table = Table(title="你的位置" + now_path, style="yellow underline") files_table.add_column("序号", justify="center", style="red bold") files_table.add_column("类型", justify="left", style="green") files_table.add_column("名称", style="cyan") files_table.add_column("体积", style="blue", justify="center") self.origin_data = API.analyze(API.list_file(path.path)) for name, resp in self.origin_data.items(): all_file.append(name) num = all_file.index(name) + 1 if self.origin_data[name].get('file'): type_of_object = "文件" else: type_of_object = "文件夹" size = self.hum_convert(resp['size']) files_table.add_row(str(num), type_of_object, name, size) console.print(files_table)
def display_J(one="opcode", two="rd", three="imm[19:12]", four="imm[11]", five="imm[10:1]", six="imm[20]"): table = Table(title="J-Type", show_header=True, header_style="bold magenta", title_style="white") table.add_column("31:31(1)", width=10, justify="center") table.add_column("30:21(10)", width=13, justify="center") table.add_column("20:20(1)", width=9, justify="center") table.add_column("19:12(8)", width=23, justify="center") table.add_column("11:07(5)", width=23, justify="center") table.add_column("06:00(7)", width=14, justify="center") table.add_row(six, five, four, three, two, one) console.print(table)
def main(buy_amount=1000, start=None, end=None, symbols=(), one_buy=False, frequency=None): data = get_data(symbols, start=start, end=end) grouper = _default_grouper if frequency == "D": grouper = lambda ts: (ts.year, ts.month, ts.day) buys = extract_buy_days(data, grouper=grouper) divs = extract_field(data, "Dividends") splits = extract_field(data, "Stock Splits") div_prices = data.T[divs.index].T.Close all_days = sorted(set(buys.index) | set(divs.index) | set(splits.index)) shares = dict.fromkeys(symbols, 0) dividends = dict.fromkeys(symbols, 0) bought = False for day in all_days: if day in buys.index and not bought: for symbol, price in data.loc[day].Close.items(): amt = float(buy_amount / price) shares[symbol] = shares.get(symbol, 0) + amt if one_buy: bought = True if day in divs.index: for symbol, amt in divs.loc[day].items(): v = shares[symbol] * amt price = div_prices[symbol][day] samt = v / price dividends[symbol] = dividends.get(symbol, 0) + v shares[symbol] = shares.get(symbol, 0) + samt if day in splits.index: for symbol, amt in splits.loc[day].items(): if amt != 0.0: shares[symbol] = shares.get(symbol, 0) * amt final_day = max(data.index) years = len(buys) / 12 total_invested = buy_amount if one_buy else buy_amount * len(buys) console.print(f"Start date: {min(data.index)}") console.print(f"Total Invested: ${total_invested:,}") table = Table(show_header=True, header_style="bold magenta", box=box.MINIMAL_HEAVY_HEAD) table.add_column("Symbol") table.add_column("Value", justify="right") table.add_column("Shares", justify="right") table.add_column("ROI", justify="right") table.add_column("CAGR", justify="right") table.add_column("Div Yield", justify="right") results = [] for symbol, price in data.loc[final_day].Close.items(): value = shares[symbol] * price roi = int(100 * ((value - total_invested) / total_invested)) ann_ret = 100 * (math.pow(value / total_invested, 1 / years) - 1) fvalue = f"${value:,.2f}" dvalue = f"${dividends[symbol]:.2f}" results.append((symbol, fvalue, shares[symbol], roi, ann_ret, dvalue)) for r in sorted(results, key=lambda x: x[4]): table.add_row(r[0], r[1], f"{r[2]:.2f}", f"{r[3]:.2f}%", f"{r[4]:.2f}%", r[5]) console.print(table)
def pretty_print_gitlab_list(elements: list[GitlabElement], name): """Print a section named `name` containing all the `elements`. For each element, its title, the relative date it was last updated at and the related project name are printed.""" if not elements: return table = Table(title=name, box=box.ROUNDED) table.add_column("Project", justify="right", style="cyan", no_wrap=True) table.add_column("Title", style="magenta") table.add_column("Labels", justify="center", style="green") table.add_column("Updated at", justify="left", style="green") table.add_column("URL", justify="left", style="blue") for element in elements: table.add_row( element.project, element.title, element.labels, element.updated_at, element.url, style="dim" if element.title.startswith("Draft:") else "bold", ) Console().print(table)
def to_build_tree(ydoc, variants, config, cbc, selected_features): for k in variants: table = Table(show_header=True, header_style="bold") table.title = f"Output: [bold white]{k}[/bold white]" table.add_column("Package") table.add_column("Variant versions") for pkg, var in variants[k].items(): table.add_row(pkg, "\n".join(var)) console.print(table) # first we need to perform a topological sort taking into account all the outputs if ydoc.get("outputs"): outputs = [ Output( o, config, parent=ydoc, conda_build_config=cbc, selected_features=selected_features, ) for o in ydoc["outputs"] ] outputs = {o.name: o for o in outputs} else: outputs = [ Output( ydoc, config, conda_build_config=cbc, selected_features=selected_features, ) ] outputs = {o.name: o for o in outputs} if len(outputs) > 1: sort_dict = { k: [x.name for x in o.all_requirements()] for k, o in outputs.items() } tsorted = toposort.toposort(sort_dict) tsorted = [o for o in tsorted if o in sort_dict.keys()] else: tsorted = [o for o in outputs.keys()] final_outputs = [] for name in tsorted: output = outputs[name] if variants.get(output.name): v = variants[output.name] combos = [] differentiating_keys = [] for k in v: if len(v[k]) > 1: differentiating_keys.append(k) combos.append([(k, x) for x in v[k]]) all_combinations = tuple(itertools.product(*combos)) all_combinations = [dict(x) for x in all_combinations] for c in all_combinations: x = output.apply_variant(c, differentiating_keys) final_outputs.append(x) else: x = output.apply_variant({}) final_outputs.append(x) temp = final_outputs final_outputs = [] has_intermediate = False for o in temp: if o.sections["build"].get("intermediate"): if has_intermediate: raise RuntimeError( "Already found an intermediate build. There can be only one!" ) final_outputs.insert(0, o) has_intermediate = True else: final_outputs.append(o) # Note: maybe this should happen _before_ apply variant?! if has_intermediate: # inherit dependencies def merge_requirements(a, b): b_names = [x.name for x in b] for r in a: if r.name in b_names: continue else: b.append(r) intermediate = final_outputs[0] for o in final_outputs[1:]: merge_requirements( intermediate.requirements["host"], o.requirements["host"] ) merge_requirements( intermediate.requirements["build"], o.requirements["build"] ) merged_variant = {} merged_variant.update(intermediate.config.variant) merged_variant.update(o.config.variant) o.config.variant = merged_variant return final_outputs
def show_bak_list(filename: Optional[Path] = None, relative_paths: bool = False): """ Prints list of .bakfiles with metadata Arguments: filename (str|os.path, optional): List only `filename`'s .bakfiles """ # pass bakfiles: List[bakfile.BakFile] bakfiles = db_handler.get_bakfile_entries(filename) if filename else \ db_handler.get_all_entries() console = Console() if not bakfiles: console.print(f"No .bakfiles found for " f"{filename}" if filename else "No .bakfiles found") return _title = f".bakfiles of {filename}" if \ filename else ".bakfiles" table = Table(title=_title, show_lines=True, box=box.HEAVY_EDGE) table.add_column("") table.add_column("Original File") table.add_column("Date Created") table.add_column("Last Modified") i = 1 for _bakfile in bakfiles: table.add_row(str(i), filename.relative_to(Path.cwd()) if relative_paths else _bakfile.orig_abspath, _bakfile.date_created.split('.')[0], _bakfile.date_modified.split('.')[0]) i += 1 console.print(table)
def main(): args = arg_parser() compressed_file = args.infile # Reading results from file with open(compressed_file) as results_file: results = json.load(results_file) pdfset_name = results["pdfset_name"] index = results["index"] # Array of index nbcomp = len(index) # Get LHAPDF datadir lhapdf_dir = Popen(["lhapdf-config", "--datadir"], stdout=PIPE) pdf_dir, _ = lhapdf_dir.communicate() pdf_dir = pdf_dir.decode("utf-8") pdf_dir = pdf_dir.replace("\n", "") # Create output file output = pdfset_name + "_compressed_" + str(nbcomp) # Create Output folders if not os.path.exists(output): os.mkdir(output) else: pass src_str = pdf_dir + "/" + pdfset_name + "/" + pdfset_name dst_str = output + "/" + output # Copy the LHAPDF replicas to the output file cindex = [] console.print("\n• Copying the selected replicas to compressed set:", style="bold blue") with trange(nbcomp) as iter_index: for ix, idx in enumerate(iter_index): indx = int(index[idx]) # Make sure it's an integer cindex.append(indx) # Extension name corresponding to the prior # Number of PDF replicas < 10000 if indx < 10: ext_name_prior = "000" + str(indx) elif indx < 100: ext_name_prior = "00" + str(indx) elif indx < 1000: ext_name_prior = "0" + str(indx) else: ext_name_prior = str(indx) # Extension name for Compressed replicas if (ix + 1) < 10: ext_name_compress = "000" + str(ix + 1) elif (ix + 1) < 100: ext_name_compress = "00" + str(ix + 1) elif (ix + 1) < 1000: ext_name_compress = "0" + str(ix + 1) else: ext_name_compress = str(ix + 1) # Construc path name src = src_str + "_" + ext_name_prior + ".dat" dst = dst_str + "_" + ext_name_compress + ".dat" # copy srouce to compressed shutil.copy(src, dst) iter_index.set_description( f"copy original_{indx} to compressed_{ix+1}") # Construct .info file for compressed set src = src_str + ".info" dst = dst_str + ".info" src_file = open(src, "r") dst_file = open(dst, "w") for line in src_file.readlines(): if "NumMembers:" in line: dst_file.write("NumMembers: " + str(nbcomp + 1) + "\n") else: dst_file.write(line) dst_file.close() src_file.close() # Fetching info from Prior Central PDF console.print("\n• Fetching information from original central PDF.", style="bold blue") w = open(src_str + "_0000.dat", "r") xpdf = [] xgrid, qgrid, fgrid = [], [], [] textxs, textqs, textfs = [], [], [] # Removing the info in the head for _ in range(0, 10): if "--" in w.readline(): break # Init grid size count s = 0 table = Table(show_header=True, header_style="bold magenta") table.add_column("N", justify="left") table.add_column("Subgrids", justify="center", width=35) while True: textxs.append(w.readline()) xs = [float(el) for el in textxs[s].split()] textqs.append(w.readline()) qs = [float(el) for el in textqs[s].split()] textfs.append(w.readline()) fs = [int(float(el)) for el in textfs[s].split()] if len(xs) == 0: break xgrid.append(xs) qgrid.append(qs) fgrid.append(fs) nx = len(xgrid[s]) nq = len(qgrid[s]) table.add_row(f"{s}", f"{len(xgrid[s])} {len(qgrid[s])} {len(fgrid[s])}") for ix in range(0, nx): for iq in range(0, nq): w.readline().split() w.readline() s += 1 w.close() console.print(table) # Reading XPDF console.print("\n• Extract grid information from compressed set:", style="bold blue") pdf = lhapdf.mkPDFs(pdfset_name) with trange(len(cindex)) as iter_index: for irep in iter_index: iter_index.set_description(f"Reading Replica {irep}") xpdf.append([]) for ss in range(s): xpdf[irep].append([]) for ix in range(len(xgrid[ss])): xpdf[irep][ss].append([]) for iq in range(len(qgrid[ss])): xpdf[irep][ss][ix].append([]) for ifl in range(len(fgrid[ss])): xfq_result = pdf[cindex[irep]].xfxQ( fgrid[ss][ifl], xgrid[ss][ix], qgrid[ss][iq]) if math.isnan(xfq_result): xfq_result = 0 xpdf[irep][ss][ix][iq].append(xfq_result) # Construct commpressed central PDF console.print("\n• Computing central replicas.", style="bold blue") w = open(dst_str + "_0000.dat", "w") w.write("PdfType: central\n") w.write("Format: lhagrid1\n---\n") for ss in range(s): w.write(textxs[ss]) w.write(textqs[ss]) w.write(textfs[ss]) for ix in range(len(xgrid[ss])): for iq in range(len(qgrid[ss])): w.write(" ") for ifl in range(len(fgrid[ss])): sum = 0 for irep in range(len(cindex)): sum += xpdf[irep][ss][ix][iq][ifl] sum /= nbcomp print("%14.7E" % sum, end=' ', file=w) w.write("\n") w.write("---\n") w.close()
ax.set_extent((-4, 8, 42, 51)) ax.set_yticks([]) ax.spines["geo"].set_visible(False) fig.savefig("tour_de_france.png") plt.show() # Code supplémentaire pour affichage du résultat dans le terminal import locale from rich.console import Console from rich.table import Table locale.setlocale(locale.LC_ALL, "") console = Console() table = Table(show_header=True, header_style="bold magenta") table.add_column("Début", justify="right") table.add_column("Fin", justify="right") table.add_column("Durée", justify="right") for trajectoire in Collection(df): table.add_row( f"{trajectoire.start:%d %B %H:%M}", f"{trajectoire.stop:%d %B %H:%M}", f"{trajectoire.duree}", ) console.print(table)
def render_tables(): console = Console( width=60, force_terminal=True, file=io.StringIO(), legacy_windows=False, color_system=None, _environ={}, ) table = Table(title="test table", caption="table caption", expand=False) table.add_column("foo", footer=Text("total"), no_wrap=True, overflow="ellipsis") table.add_column("bar", justify="center") table.add_column("baz", justify="right") table.add_row("Averlongwordgoeshere", "banana pancakes", None) assert Measurement.get(console, console.options, table) == Measurement(41, 48) table.expand = True assert Measurement.get(console, console.options, table) == Measurement(41, 48) for width in range(10, 60, 5): console.print(table, width=width) table.expand = False console.print(table, justify="left") console.print(table, justify="center") console.print(table, justify="right") assert table.row_count == 1 table.row_styles = ["red", "yellow"] table.add_row("Coffee") table.add_row("Coffee", "Chocolate", None, "cinnamon") assert table.row_count == 3 console.print(table) table.show_lines = True console.print(table) table.show_footer = True console.print(table) table.show_edge = False console.print(table) table.padding = 1 console.print(table) table.width = 20 assert Measurement.get(console, console.options, table) == Measurement(20, 20) table.expand = False assert Measurement.get(console, console.options, table) == Measurement(20, 20) table.expand = True console.print(table) table.columns[0].no_wrap = True table.columns[1].no_wrap = True table.columns[2].no_wrap = True console.print(table) table.padding = 0 table.width = 60 table.leading = 1 console.print(table) return console.file.getvalue()
def store(): p = player.parts * 10.00 b = player.bullets * 2.00 c = player.clothes * 40.00 f = player.food * 0.20 o = player.oxen * 40.00 console = Console() # Store interface table = Table(show_header=True, header_style="bold magenta") table.add_column("Goods") table.add_column("Cost", justify="right") table.add_row( "1. Oxen", f"[green]${player.oxen * 40.00}[/green]" ) table.add_row( "2. Food", f"[green]${player.food * 0.20}[/green]", ) table.add_row( "3. Clothing", f"[green]${player.clothes * 40.00}[/green]" ) table.add_row( "4. Ammunition", f"[green]${player.bullets * 2.00}[/green]" ) table.add_row( "5. Spare Parts", f"[green]${player.parts * 10.00}[/green]" ) table.add_row( "\nTotal", f"\n[green]${o+f+c+b+p}[/green]" ) console.print(table) while True: print("Which item would you like to buy?") selection = input("\n-->") if selection == "1": oxen() break elif selection == "2": food() break elif selection == "3": clothes() break elif selection == "4": bullets() break elif selection == "5": parts() break else: print("\n[red]Invalid Selection[/red]\n") continue
"$848,998,877", ], ] console = Console() BEAT_TIME = 0.04 @contextmanager def beat(length: int = 1) -> None: yield time.sleep(length * BEAT_TIME) table = Table(show_footer=False) table_centered = Align.center(table) console.clear() with Live(table_centered, console=console, screen=False, refresh_per_second=20): with beat(10): table.add_column("Release Date", no_wrap=True) with beat(10): table.add_column("Title", Text.from_markup("[b]Total", justify="right")) with beat(10): table.add_column("Budget", "[u]$412,000,000", no_wrap=True)
def model_view(model_group: List[MLModel], quiet=False, list_all=False): if quiet: # print only ID of models model_ids = [str(model.id) for model in model_group] console.print(*model_ids, sep='\n') return table = Table(show_header=True, header_style="bold magenta") table.add_column('ID') table.add_column('ARCH NAME') table.add_column('FRAMEWORK') table.add_column('ENGINE') table.add_column('VERSION') table.add_column('DATASET') table.add_column('METRIC') table.add_column('SCORE') table.add_column('TASK', width=15) table.add_column('STATUS') # flatten list model_args = list() for index, model in enumerate(model_group): # build arguments for pass into `single_model_view` model_group_args = (model, False if index == 0 else True) model_args.append(model_group_args) # add group separator model_args.append((None, False)) # cut last separator if len(model_args) > 0: model_args.pop() # show all if list_all: for model, top in model_args: table.add_row(*single_model_view(model, top)) else: # not show all if len(model_args) <= 5: for model, top in model_args: table.add_row(*single_model_view(model, top)) else: # head for model, top in model_args[:4]: table.add_row(*single_model_view(model, top)) # middle table.add_row(*['...'] * len(table.columns)) # tail table.add_row(*single_model_view(*model_args[-1])) console.print(table)
cumulative_cases = ['student_cases_cumulative', 'staff_cases_cumulative'] for case in cumulative_cases: if entry[case].isnumeric() or is_number(entry[case]) == True: entry[case] = int(entry[case]) total_cases += entry[case] else: entry[case] = 0 entry['total_cases'] = total_cases for case in new_cases: if entry[case].isnumeric() == False: entry[case] = 0 data.append(entry) table = Table(title="Top Schools") table.add_column("School name", style="blue", no_wrap=True) table.add_column("Total cases", style="red", no_wrap=False) table.add_column("New student cases", style="magenta", no_wrap=False) table.add_column("New staff cases", style="magenta", no_wrap=False) table.add_column("Number of students (est.)", style="green", no_wrap=False) table.add_column("Percentage (est.)", style="cyan", no_wrap=False) NUMBER_OF_SCHOOLS = 25 top_schools = sorted(data, key=lambda i: i['total_cases'], reverse=True)[:NUMBER_OF_SCHOOLS] for school in track(top_schools): school_name = school['school_or_school_district'] total_cases = school['total_cases'] new_student_cases = school['student_cases_new'] new_staff_cases = school['staff_cases_new']