def list_datasets(basedir, db, index): """List local store content.""" # Read the index of given. loader = DictLoader( util.read_index(index)) if index is not None else UrlLoader() store = RefStore(basedir=basedir, loader=loader, connect_url=db) datasets = store.list() headers = ['Name', 'Size', 'Downloaded', 'Package'] data = list() # Maintain the maximum with for each columns. widths = [len(h) + 1 for h in headers] # Sort datasets by name before output. for dataset in sorted(datasets, key=lambda d: d.name): row = [ dataset.identifier, '{:.2a}'.format(DataSize(dataset.filesize)), ' '.join(dataset.created_at.isoformat()[:19].split('T')), '{} {}'.format(dataset.package_name, dataset.package_version) ] for i in range(len(row)): w = len(row[i]) + 1 if w > widths[i]: widths[i] = w data.append(row) tp.table(data, headers=headers, width=widths, style='grid', out=util.TPrinter())
def __repr__(self): def truncate(str_, length, separator="..."): if len(str_) <= length: return str_ start = math.ceil((length - len(separator)) / 2) end = math.floor((length - len(separator)) / 2) return f"{str_[:start]}{separator}{str_[len(str_) - end:]}" io = StringIO("") n_columns = len(self._controlled_by.dtype.names) by = self._controlled_by.copy().astype( list(zip(self._controlled_by.dtype.names, ["O"] * n_columns))) by["position"] = [ ", ".join([f"{x:.2f}" for x in p]) for p in by["position"] ] by["actor_id"] = [truncate(p, 20) for p in by["actor_id"]] by["vehicle_id"] = [truncate(p, 20) for p in by["vehicle_id"]] by["shadow_actor_id"] = [ truncate(p, 20) for p in by["shadow_actor_id"] ] by["is_boid"] = [str(bool(x)) for x in by["is_boid"]] by["is_hijacked"] = [str(bool(x)) for x in by["is_hijacked"]] by["actor_type"] = [ str(_ActorType(x)).split(".")[-1] for x in by["actor_type"] ] # XXX: tableprint crashes when there's no data if by.size == 0: by = [[""] * n_columns] tp.table(by, self._controlled_by.dtype.names, style="round", out=io) return io.getvalue()
def demo_class(): print("Below are tables validating operation,. Second table") print("involves an int or float, in both operation orders.") a = val_uncert(1, 0.5) b = val_uncert(3, 1.2) c = val_uncert(3, 2.0) tp.table(width=[8, 8, 12], headers=[' ', 'Value', 'Uncertainty'], data=[[' a', a.val, a.uncert], [' b', b.val, b.uncert], [' a+b', (a + b).val, (a + b).uncert], [' a-b', (a - b).val, (a - b).uncert], [' a*b', (a * b).val, (a * b).uncert], [' a/b', (a / b).val, (a / b).uncert], [' b/a', (b / a).val, (b / a).uncert], [' a**b', (a**b).val, (a**b).uncert], [' exp(a)', (exp(a)).val, (exp(a)).uncert], ['(a+b)-b', ((a + b) - b).val, ((a + b) - b).uncert], ['(a*b)/b', ((a * b) / b).val, ((a * b) / b).uncert]]) tp.table(width=[8, 8, 12], headers=[' ', 'Value', 'Uncertainty'], data=[[' c', c.val, c.uncert], [' 4+c', (4 + c).val, (4 + c).uncert], [' c+4', (c + 4).val, (c + 4).uncert], [' 4-c', (4 - c).val, (4 - c).uncert], [' c-4', (c - 4).val, (c - 4).uncert], [' 4*c', (4 * c).val, (4 * c).uncert], [' c*4', (c * 4).val, (c * 4).uncert], [' 4/c', (4 / c).val, (4 / c).uncert], [' c/4', (c / 4).val, (c / 4).uncert], [' 4**c', (4**c).val, (4**c).uncert], [' c**4', (c**4).val, (c**4).uncert]])
def final_save(self, model_name, save_dir): """ Print and save the best results Args: model_name: save_dir: directory for saving results """ if (self.save_precision is not None) and (self.save_recall is not None): tp.banner('This is the best results!') mean = (self.save_precision[100] + self.save_precision[200] + self.save_precision[300]) / 3 data = [[ self.save_precision[100], self.save_precision[200], self.save_precision[300], mean, self.best_auc, self.f1_score ]] headers = ['P@100', 'P@200', 'P@300', 'Mean', 'AUC', 'Max F1'] tp.table(data, headers) ensure_folder(save_dir) np.save(os.path.join(save_dir, '{}_recall.npy'.format(model_name)), self.save_recall[:2000]) np.save( os.path.join(save_dir, '{}_precision.npy'.format(model_name)), self.save_precision[:2000]) else: logger.error('No model result to save')
def help(self): header = ("command", "description") mx_cmd_size = len(header[0]) mx_desc_size = len(header[1]) bases = [] bizs = [] for name, obj in self.get_entrypoints().items(): mx_cmd_size = max(mx_cmd_size, len(name)) mx_desc_size = max(mx_desc_size, len(obj._doc)) if obj._base: bases.append((name, obj._doc)) else: bizs.append((name, obj._doc)) if bizs: bizs.sort() print_formatted_text("\nbiz commands:") tableprint.table(bizs, header, width=(mx_cmd_size + 5, mx_desc_size + 5), style='grid') if bases: print_formatted_text("\ncommon commands:") bases.sort() tableprint.table(bases, header, width=(mx_cmd_size + 5, mx_desc_size + 5), style='grid')
def __repr__(self): io = StringIO("") n_columns = len(self._controlled_by.dtype.names) by = self._controlled_by.copy().astype( list(zip(self._controlled_by.dtype.names, ["O"] * n_columns))) by["position"] = [ ", ".join([f"{x:.2f}" for x in p]) for p in by["position"] ] by["actor_id"] = [truncate(p, 20) for p in by["actor_id"]] by["vehicle_id"] = [truncate(p, 20) for p in by["vehicle_id"]] by["shadow_actor_id"] = [ truncate(p, 20) for p in by["shadow_actor_id"] ] by["is_boid"] = [str(bool(x)) for x in by["is_boid"]] by["is_hijacked"] = [str(bool(x)) for x in by["is_hijacked"]] by["actor_type"] = [ str(_ActorType(x)).split(".")[-1] for x in by["actor_type"] ] # XXX: tableprint crashes when there's no data if by.size == 0: by = [[""] * n_columns] tp.table(by, self._controlled_by.dtype.names, style="round", out=io) return io.getvalue()
def print_metrics(period, met, instances): print(met['MetricName']) for region in instances.keys(): headers = np.array(["ID", "Min", "Max", "Average", "Sum", "Sample count"]) watch = regions_cloudwatches[region] metrics = [] for instance_id in instances[region].keys(): if not instances[region][instance_id].state['Name'] == 'running': continue try: data = metric_for_instance(period, met, instance_id, watch) for data_key in ['Minimum', 'Maximum', 'Average', 'Sum']: if data['Unit'] == 'Bytes': data[data_key] = sizeof_fmt(data[data_key]) elif data['Unit'] == 'Percent': data[data_key] = f"{data[data_key]} %" elif not data['Unit'] == 'Count': data[data_key] = f"{data[data_key]} {data['Unit']}" metrics.append([instance_id, data['Minimum'], data['Maximum'], data['Average'], data['Sum'], data['SampleCount']]) except: print(f"Couldn't get metrics for {instance_id}") if len(metrics) > 0: print(f"{region}:") metrics = np.array(metrics) width = np_len(np.append(headers.reshape(1, -1), metrics, axis=0)).max(axis=0) tp.table(metrics, headers, width=width)
def print_table(headers, columns, data): bodys = [] for index, value in enumerate(columns): tmp = [value] tmp.extend(data[index]) bodys.append(tmp) tp.table(bodys, headers)
def list_repository(index): """List repository index content.""" # Read the index from the optional file or Url. By default, the index that # is specified in the environment is loaded. loader = DictLoader( util.read_index(index)) if index is not None else UrlLoader() datasets = RepositoryManager(doc=loader.load()).find() headers = ['Identifier', 'Name', 'Description'] data = list() # Maintain the maximum with for each columns. widths = [len(h) + 1 for h in headers] # Sort datasets by name before output. for dataset in sorted(datasets, key=lambda d: d.name): desc = dataset.description if dataset.description is not None else '' row = [dataset.identifier, dataset.name, desc] for i in range(len(row)): w = len(row[i]) + 1 if w > widths[i]: widths[i] = w data.append(row) tp.table(data, headers=headers, width=widths, style='grid', out=util.TPrinter())
def pcb_print(pcb_info): print('\033[96m', end='') tp.table( pcb_info, headers=['PID', 'Arrival Time', 'Burst Time', 'Priority', 'State'], width=12) print('\033[0m', end='')
def draw_table(self, header, data): strio = io.StringIO() tableprint.table(data, header, out=strio) rows = strio.getvalue().split('\n') for row in rows: if len(row) > 0: self.poutput(row)
def result(method, c=1, n=50, mx1=1, vx1=2, my1=1, vy1=2, mx2=10, vx2=2, my2=10, vy2=2): if method == "gradient": [w, c0, c1] = gradient(n, mx1, vx1, my1, vy1, mx2, vx2, my2, vy2, c) print("Gradient descent :\n") if method == "newton": [w, c0, c1] = newton(n, mx1, vx1, my1, vy1, mx2, vx2, my2, vy2, c) print("w") print(w, end="\n\n") print("Confusion Matrix :") headers = [" ", "Predict c1", "Predict c2"] r1 = ["Is c1", c0, n - c0] r2 = ["Is c2", n - c1, c1] tp.table([r1, r2], headers) print("Sensitivity (Successfully predict cluster 1):" + str(c0 / n)) print("Specificity (Successfully predict cluster 2):" + str(c1 / n)) return w
def deploy_get(config, id): """Trigger a specific deploy""" client = Deploy(config.client) result = client.get(id) data = response('deploy', 'trigger', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def server_get(config, id): """Get info from server""" client = Server(config.client) result = client.get(id) data = response('server', 'get', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def server_list(config): """List servers""" client = Server(config.client) result = client.list() data = response('server', 'list', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def __printTable__(title, data, headers): tp.banner(title) columns_max_width = __calculateColumnsMaxWidth__(headers, data) tp.table(data=data, headers=headers, width=columns_max_width, style='fancy_grid')
def environment_get(config, id): """Get info from environment""" client = Environment(config.client) result = client.get(id) data = response('environment', 'get', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def environment_list(config, repository=None): """List environments""" client = Environment(config.client) result = client.list(repository) data = response('environment', 'list', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def output(cls, ns, explorer): if ns.output_format == "ascii_table": headers = ["schema", "table", "column", "has_pii"] tableprint.table(explorer.get_tabular(ns.list_all), headers) elif ns.output_format == "json": print(json.dumps(explorer.get_dict(), sort_keys=True, indent=2, cls=PiiTypeEncoder)) elif ns.output_format == "db": DbStore.save_schemas(explorer)
def output(cls, ns, explorer): if ns.catalog["format"] == "ascii_table": headers = ["schema", "table", "column", "has_pii"] tableprint.table(explorer.get_tabular(ns.list_all), headers) elif ns.catalog["format"] == "json": FileStore.save_schemas(explorer) elif ns.catalog["format"] == "db": DbStore.save_schemas(explorer)
def deploy_list(config, repository, environment): """List deploys""" client = Deploy(config.client) result = client.list(repository, environment) data = response('deploy', 'list', result) tableprint.table(data, headers=header, width=int(config.width), style=config.style, out=output) click.echo(output.getvalue())
def print_assignment(assignment): header=['Case', 'Team'] data =[] for team in sorted(assignment.keys()): for each in assignment[team]: data.append([each,team]) tableprint.table(data, header, style='fancy_grid', width=20)
def display(recomm): """ This function displays the recommended tracks. @params recomm: recommended tracks composed by name of track and artist, and coefficient of acceptance. """ headers = ['Track', 'Artist', '[Similarity + Popularity] Coefficient'] tp.table(recomm, headers)
def dispatch(cls, ns): logging.debug("File Dispatch entered") explorer = cls(ns) explorer.scan() if ns.catalog['format'] == "ascii_table": headers = ["Path", "Mime/Type", "pii"] tableprint.table(explorer.get_tabular(), headers) elif ns.catalog['format'] == "json": FileStore.save_schemas(explorer)
def _report_result(self): """ Report auc value, precisions """ mean = (self.precision_100 + self.precision_200 + self.precision_300) / 3 data = [[self.precision_100, self.precision_200, self.precision_300, mean, self.current_auc, self.current_f1, self.best_auc]] headers = ['P@100', 'P@200', 'P@300', 'Mean', 'AUC', 'Max F1', 'Best-AUC'] tp.table(data, headers)
def test_table(): """Tests the table function""" output = StringIO() table([[1, 2, 3], [4, 5, 6]], 'ABC', style='round', width=5, out=output) assert output.getvalue( ) == '╭───────┬───────┬───────╮\n│ A │ B │ C │\n├───────┼───────┼───────┤\n│ 1 │ 2 │ 3 │\n│ 4 │ 5 │ 6 │\n╰───────┴───────┴───────╯\n' # noqa output = StringIO() table(["bar"], "foo", style='grid', width=3, out=output) assert output.getvalue( ) == '+---+---+---+\n| f| o| o|\n+---+---+---+\n| b| a| r|\n+---+---+---+\n' # noqa
def scan(dic): final_data = [] print(Colors.GREEN + "\n [+] List:\t" + Colors.ORANGE) for host, port in dic.items(): findVuln(host, port, final_data) hdHost = Colors.GREEN + "Host" + Colors.DEFAULT hdUsr = Colors.GREEN + "Username" + Colors.DEFAULT hdPass = Colors.GREEN + "Password" + Colors.DEFAULT cabeceras = [hdHost, hdUsr, hdPass] tp.table(final_data, cabeceras, width=15) pass
def printTable(ax): a = [] for i in ax: a.append(i[:]) for i in range(6): a[i].insert(0, i) print(tp.top(7, 3)) print(tp.row(["x/y", 0, 1, 2, 3, 4, 5], 3)) print(tp.bottom(7, 3)) print("\r", end='\r\r\r') tp.table(a, None, '5g', 5, 'fancy_grid')
def query_hyrise(host, port, query, print_result=True): try: url = "http://{host}:{port}/query".format(host=host, port=port) data = "query={data}".format(data=query) result = requests.post(url, data).json() if print_result and contains_table(result): tableprint.table(result['rows'], result['header']) else: logging.warning( pprint.pformat(result, indent=20, width=120, compact=True)) except requests.RequestException as e: logging.error(e)
def volumes_pretty_print(volumes): def get_volume_array(volume): # For creation date, we should parse as datetime and then strftime("%m/%d/%Y, %H:%M:%S") return np.array( [volume.create_time.strftime("%m/%d/%Y, %H:%M:%S"), volume.id, volume.iops, volume.volume_type, volume.size]) for region in volumes.keys(): print(f"{region}:") headers = np.array(["Creation Date", "ID", "IOPS", "Type", "Size"]) data = np.array(list(map(get_volume_array, volumes[region].values()))) width = np_len(np.append(headers.reshape(1, -1), data, axis=0)).max(axis=0) tp.table(data, headers, width=width)
def images_pretty_print(images): def get_image_array(image): # For creation date, we should parse as datetime and then strftime("%m/%d/%Y, %H:%M:%S") return np.array( [image.creation_date, image.id, image.name, image.image_type]) for region in images.keys(): print(f"{region}:") headers = np.array(["Creation Date", "ID", "Name", "Type"]) data = np.array(list(map(get_image_array, images[region].values()))) width = np_len(np.append(headers.reshape(1, -1), data, axis=0)).max(axis=0) tp.table(data, headers, width=width)
print " " final_data = [] try: for obj in range(0,totUsr): temp = [] _usuario = dataJson["list"][obj]["uid"] _password = dataJson["list"][obj]["pwd"] _role = dataJson["list"][obj]["role"] temp.append(_usuario) temp.append(_password) temp.append(_role) final_data.append(temp) hdUsr = Colors.GREEN + "Username" + Colors.DEFAULT hdPass = Colors.GREEN + "Password" + Colors.DEFAULT hdRole = Colors.GREEN + "Role ID" + Colors.DEFAULT cabeceras = [hdUsr, hdPass, hdRole] tp.table(final_data, cabeceras, width=20) except Exception, e: print "\n [!]: "+str(e) print " [+] "+ str(dataJson) print "\n"
import tableprint import numpy as np data = np.random.randn(10,3) headers = ['Column A', 'Column B', 'Column C'] tableprint.table(data, headers)
def main(): """Search in multiple torrent sites. Usage: katcr [options] [interactive options] <SEARCH_TERM> Currently available search engines: - Katcr - ThePirateBay - Nyaa - Skytorrents - Digbt Options: -e --search-engines=<SearchEngine> Torrent search engine to use [default: All]. -p --pages=<PAGES_NUM> Number of pages to lookup [default: 1] -d --disable-shortener Disable url shortener -s --shortener=<SHORTENER_URL> Use given magnet shortener to prettify urls. [default: http://www.shortmag.net] -t --token=<SHORTENER_TOKEN> Shortener token to use, if required -t --token_file=<S_TOKEN_FILE> Shortener token file Interactive Options: -i --interactive Enable interactive mode -o --open Launch with default torrent app in interactive mode [default: True] -h --help Show this help screen -v --verbose Enable debug mode katcr Copyright (C) 2017 David Francos Cuartero This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; """ opt = docopt(main.__doc__, version="1.0.1") logger = Gogo(__name__, verbose=opt.get('--verbose')).logger search_res = search_in_engines(logger, opt['--search-engines'], opt["<SEARCH_TERM>"], int(opt.get("--pages")[0])) if not opt['--disable-shortener']: shortener = get_shortener_from_opts(opt) with suppress(TypeError): search_res = list(get_from_short(shortener, search_res)) if not search_res: return if not opt['--interactive']: return tableprint.table(search_res, ['Description', 'Size', 'Link'], width=[max(len(a[p]) for a in search_res) for p in range(0, len(search_res[0]))]) res = {limit_terminal_size(a): b for a, _, b in search_res} result = res[prompt([List('Link', message="", choices=res.keys())])['Link']] if opt['--open']: return subprocess.check_call(['xdg-open', result])