def print_task_info(task_info: WdcTaskInfo): def print_section_header(text): return print( f'{os.linesep}{fg(0)}{bg(111)}{attr(1)}:: {text} {attr(0)}{os.linesep}' ) def print_task_attribute(attribute, value): return print(f'{attribute} :\t{value}') print_section_header('Current') current = task_info.current print_task_attribute('id ', current.id) print_task_attribute('description', current.description) print_task_attribute('timestamp ', current.timestamp) print_task_attribute('start ', current.start) print_task_attribute('end ', current.end) print_task_attribute('tags ', current.tags) print_section_header('History') if not task_info.history: print_info('No history found') else: tt.print(list( map(lambda i: task_to_history_print(i), task_info.history)), header=[ 'Timestamp', 'Date', 'Start', 'End', 'Tags', 'Description' ], style=tt.styles.rounded_double)
def get_extensions_table(extensions: list, path: str) -> None: """Prints a table of installed extensions. Args: extensions: A list of installed extension meta data. path: A string for the path to installed Chrome extensions. Returns: None. """ print(f"\nExtensions Found in {path}") data = [] for ext in extensions: data.append([ext["name"], ext["version"].split("_")[0], ext["id"]]) header = [ "\033[1mName\033[0m", "\033[1mVersion\033[0m", "\033[1mIdentifier\033[0m", ] termtables.print( data, header=header, style=termtables.styles.thin_double, padding=(0, 1), alignment="lll", )
def get_virustotal_table(results: list) -> None: """Builds a table of VirusTotal results of an extension's "external calls." Args: results: A list of VirusTotal results for passed-in hostnames. Returns: None. """ if len(results) == 0: error("No external calls were found for this extension.", True) data = [] for result in results: data.append( [result["url"], result["vt"]["positives"], result["vt"]["total"]]) header = [ "\033[1mHostname\033[0m", "\033[1mPositives\033[0m", "\033[1mTotal\033[0m", ] termtables.print( data, header=header, style=termtables.styles.thin_double, padding=(0, 1), alignment="lll", )
def list(ctx, output): """list command for users""" if ctx.obj["debug"]: click.echo("Debug mode initiated") set_trace() ctx.obj["user"]["output"] = output click.secho("user list subcommand", fg=colors.COLOR_SUCCESS) # Run API call response = ctx.obj["client"]._get_users() # Output values click.secho("response: \n\n", fg=colors.COLOR_SUCCESS) if ctx.obj["user"]["output"] == "table": data = [user_results_to_list(i) for i in response] tt.print( data, header=TERMTABLES_HEADER, style=tt.styles.double, ) else: pp.pprint(response) click.secho("\n\n", fg=colors.COLOR_SUCCESS)
def display_answers(self): """ This method display the answers as a numbered list """ print('') title = f'=== {self.menu_title} ===' print(title) # This statement is for manage two list sent in a tuple if isinstance(self.answers, tuple): to_substitute_list = self.answers[0] substituted_list = self.answers[1] zip_list = zip(to_substitute_list, substituted_list) table_header = ['', 'Produit à substituer', 'Produit de substitut'] table_data = list() for i, (to_substitute, substituted) in enumerate(zip_list): line = [ f'{i + 1}.', to_substitute['name'], substituted['name'] ] table_data.append(line) if table_data: termtables.print(table_data, header=table_header, style=termtables.styles.ascii_thin, padding=(0, 1), alignment='lll') else: print('Pas de substitut enregistré') else: table_header = [] table_data = [] for i, choice in enumerate(self.answers): # This statement is for a simple string answer if isinstance(choice, str): line = [f'{i + 1}.', choice] if len(table_header) > 2: blank_to_add = len(table_header) - len(line) for _ in range(blank_to_add): line.append('') table_data.append(line) # This statement is for an answer sent as a dict if isinstance(choice, dict): if not table_header: table_header = [ '', 'Nom du produit', 'Nutriscore', 'Code barre' ] line = [ f'{i + 1}.', choice['name'], choice['nutriscore_grade'], str(choice['code']) ] table_data.append(line) termtables.print(table_data, header=table_header, style=termtables.styles.ascii_thin)
def Table(self, data: List[List[str]], padding: Tuple[int, int] = (0, 1), alignment: str = "rl"): termtables.print(data, style=termtables.styles.thin, padding=padding, alignment=alignment)
def render(self): """Render current state of warehouse as table in console.""" repr = np.empty(self.grid_size, dtype="<U2") # initialise warehouse for shelf in self.shelves: # show shelves repr[self.pos_tuple(shelf)] = chr(9633) for pt in self.pick_pts: # show pick points repr[self.pos_tuple(pt)] = chr(9635) repr[self.pos_tuple( self.position)] = "x" # show current position of agent tt.print(repr)
def print_tasks(tasks): task_info = [] for i, task in enumerate(tasks): task_info.append([(i + 1), task.name, task.content]) if len(task_info) > 0: table_header = ('#', 'Name', 'Content') tt.print(task_info, header=table_header, padding=(0, 1)) else: print('Nothing is selected')
def render_possible_actions(self): """Render warehouse with possible actions for each corridor field.""" repr = np.empty(self.grid_size, dtype="<U2") # initialise warehouse for shelf in self.shelves: # show shelves repr[self.pos_tuple(shelf)] = chr(9633) for pt in self.pick_pts: # show pick points repr[self.pos_tuple(pt)] = chr(9635) for i, corridor in enumerate(self.corridors): repr[self.pos_tuple(corridor)] = chr(self.action_symbols[i]) tt.print(repr)
def print_describe_table(self, desc): table_data = [] for key in desc: data = desc[key] if "label" in data and "value" in data: table_data.append([data["label"], data["value"]]) tt.print(table_data, header=["Name", "Value"], padding=(0, 1), alignment="lr")
def render(self): """Render current state of warehouse as table in console.""" repr = np.empty(self.grid_size, dtype="<U2") # initialise warehouse for shelf in self.shelves: # show shelves repr[self.pos_tuple(shelf)] = chr(9633) for pt in self.pick_pts: # show pick points repr[self.pos_tuple(pt)] = chr(9635) for c in self.corridors: repr[self.pos_tuple(c)] = "" for agent in range(self.n_agents): repr[self.pos_tuple(self.position[agent])] += str( agent) # show current position of agent tt.print(repr)
def list_all(ctx, date, all): tasks = list_tasks(date, all) tasks_to_print = [] for task in tasks: tasks_to_print.append(task_to_printout(task)) if not tasks: print_warning('No tasks found') ctx.exit() tt.print(tasks_to_print, header=['Id', 'Date', 'Start', 'End', 'Tags', 'Description'], style=tt.styles.thin_thick)
def search(self, criteria: str = '', limit: int = 10, format: str = 'table'): total_in_db = self.session.query(BooksTable.uid).count() r = self.session.query(BooksTable.title, BooksTable.date_published, BooksTable.pages, BooksTable.url, BooksTable.isbn13)\ .filter(BooksTable.title.like(criteria))\ .order_by(desc(BooksTable.date_published))\ .limit(limit) data = [] # print(self.__default__orm) header = [ colored('Date', "cyan", attrs=['bold']), colored('Pages', "cyan", attrs=['bold']), colored('ISBN13', "cyan", attrs=['bold']), colored('Title', "cyan", attrs=['bold']), colored('Url', "cyan", attrs=['bold']) ] for book in r: data.append([ str(book.date_published), book.pages, book.isbn13, textwrap.fill(book.title, 90), textwrap.fill(book.url, 100) ]) if format == 'table': if len(data) == 0: tt.print([[f"No results for: {criteria}"]], style=tt.styles.ascii_thin) else: h = [header] h.extend(data) title = "---| " + colored("Results for:", "yellow") + colored(f" {criteria} ", "green") + \ ", Total DB: " + colored(number_format(total_in_db), "green") + \ ", ORM: " + \ colored(self.__default__orm, "green") + " |" t = AsciiTable(h, title=title) t.inner_row_border = True t.CHAR_OUTER_TOP_LEFT = "╭" t.CHAR_OUTER_BOTTOM_LEFT = "╰" t.CHAR_OUTER_BOTTOM_RIGHT = "╯" t.CHAR_OUTER_TOP_RIGHT = "╮" t.padding_left = 2 t.justify_columns = {0: 'left', 1: 'left', 2: 'left'} print("\n") print(t.table) #tt.print(data, header=header, padding=(0, 1), style=tt.styles.ascii_thin, alignment='lll') elif format == 'json': print(json.dumps(data)) return data
def print_stats(sources, keys, stats_sum): stats = list() for name, frames in sources.items(): for key in keys: stats.append( make_stats(source=name, key=key, values=filter_not_none(frames[key]))) if stats_sum: stats.append( make_stats(source=name, key='sum', values=sum_multiple(frames, keys))) metrics = list(stats[0].keys()) termtables.print( [list(v.values()) for v in stats], header=metrics, style=termtables.styles.markdown, )
def get_reports_table(extensions: list) -> None: """Builds a table of installed extension details from CRXcavator. Args: extensions: A list of extension identifier strings. Returns: None. """ data = [] for extension in extensions: report = get_report(extension["id"]) if report: version = report[-1]["version"] webstore = report[-1]["data"]["webstore"] risk = report[-1]["data"]["risk"] data.append([ webstore["name"], extension["id"], version, webstore["last_updated"], round(webstore["rating"], 2), risk["total"], ]) header = [ "\033[1mName\033[0m", "\033[1mIdentifier\033[0m", "\033[1mVersion\033[0m", "\033[1mUpdated\033[0m", "\033[1mRating\033[0m", "\033[1mRisk\033[0m", ] termtables.print( data, header=header, style=termtables.styles.thin_double, padding=(0, 1), alignment="llllll", )
def create(ctx, payload, output): """create user from payload""" if ctx.obj["debug"]: click.echo("Debug mode initiated") set_trace() click.secho("user create subcommand", fg=colors.COLOR_SUCCESS) ctx.obj["user"]["output"] = output source = json.load(payload) click.secho("Data loaded: \n\n", fg=colors.COLOR_SUCCESS) click.secho("{}".format(source), fg=colors.COLOR_SUCCESS) # data = UserCreate(email=source["email"], password=source["password"]) data = {"email": source["email"], "password": source["password"]} json_compatible_data = jsonable_encoder(data) click.secho("data: \n\n", fg=colors.COLOR_SUCCESS) click.secho("{}".format(data), fg=colors.COLOR_SUCCESS) # Run API call response = ctx.obj["client"]._post_create_user(json_compatible_data) # Output values click.secho("response: \n\n", fg=colors.COLOR_SUCCESS) if ctx.obj["user"]["output"] == "table": data = [user_results_to_list(i) for i in response] tt.print( data, header=TERMTABLES_HEADER, style=tt.styles.double, ) else: pp.pprint(response) click.secho("\n\n", fg=colors.COLOR_SUCCESS)
def test_table(): numpy.random.seed(0) data = numpy.random.rand(5, 2) string = tt.to_string(data) assert (string == """┌────────────────────┬────────────────────┐ │ 0.5488135039273248 │ 0.7151893663724195 │ ├────────────────────┼────────────────────┤ │ 0.6027633760716439 │ 0.5448831829968969 │ ├────────────────────┼────────────────────┤ │ 0.4236547993389047 │ 0.6458941130666561 │ ├────────────────────┼────────────────────┤ │ 0.4375872112626925 │ 0.8917730007820798 │ ├────────────────────┼────────────────────┤ │ 0.9636627605010293 │ 0.3834415188257777 │ └────────────────────┴────────────────────┘""") # test print tt.print(data)
import termtables as tt header = ["a", "bb", "ccc"] data = [ [1, 2, 3], [613.23236243236, 613.23236243236, 613.23236243236] ] tt.print( data, header=header, style=tt.styles.ascii_thin, padding=(0, 1), alignment="lcr" )
def console_print_results(matrix_of_code_regions, opts): try: import termtables as tt except ImportError: print("error: termtables not found.") sys.exit(1) headers_names = [None] * (len(opts.file_names) + 1) headers_names[0] = " " max_code_regions = 0 print("Input files:") for i in range(len(matrix_of_code_regions)): if max_code_regions < len(matrix_of_code_regions[i]): max_code_regions = len(matrix_of_code_regions[i]) print("[f" + str(i + 1) + "]: " + get_filename_from_path(opts.file_names[i])) headers_names[i + 1] = "[f" + str(i + 1) + "]: " print("\nITERATIONS: " + str(matrix_of_code_regions[0][0].iterations) + "\n") for i in range(max_code_regions): print("\n-----------------------------------------\nCode region: " + str(i + 1) + "\n") table_values = [[[None] for i in range(len(matrix_of_code_regions) + 1)] for j in range(7)] table_values[0][0] = "Instructions: " table_values[1][0] = "Total Cycles: " table_values[2][0] = "Total uOps: " table_values[3][0] = "Dispatch Width: " table_values[4][0] = "uOps Per Cycle: " table_values[5][0] = "IPC: " table_values[6][0] = "Block RThroughput: " for j in range(len(matrix_of_code_regions)): if len(matrix_of_code_regions[j]) > i: table_values[0][j + 1] = str( matrix_of_code_regions[j][i].instructions) table_values[1][j + 1] = str( matrix_of_code_regions[j][i].total_cycles) table_values[2][j + 1] = str( matrix_of_code_regions[j][i].total_uops) table_values[3][j + 1] = str( matrix_of_code_regions[j][i].dispatch_width) table_values[4][j + 1] = str( round(matrix_of_code_regions[j][i].uops_per_cycle, 2)) table_values[5][j + 1] = str( round(matrix_of_code_regions[j][i].ipc, 2)) table_values[6][j + 1] = str( round(matrix_of_code_regions[j][i].block_rthroughput, 2)) else: table_values[0][j + 1] = "-" table_values[1][j + 1] = "-" table_values[2][j + 1] = "-" table_values[3][j + 1] = "-" table_values[4][j + 1] = "-" table_values[5][j + 1] = "-" table_values[6][j + 1] = "-" tt.print( table_values, header=headers_names, style=tt.styles.ascii_thin_double, padding=(0, 1), ) print("\nResource pressure per iteration: \n") table_values = [[[None] for i in range( len(matrix_of_code_regions[0][0].iteration_resource_pressure) + 1)] for j in range(len(matrix_of_code_regions) + 1)] table_values[0] = matrix_of_code_regions[0][ 0].name_target_info_resources for j in range(len(matrix_of_code_regions)): if len(matrix_of_code_regions[j]) > i: table_values[j + 1] = [ "[f" + str(j + 1) + "]: " ] + matrix_of_code_regions[j][i].iteration_resource_pressure else: table_values[j + 1] = [ "[f" + str(j + 1) + "]: " ] + len(matrix_of_code_regions[0] [0].iteration_resource_pressure) * ["-"] tt.print( table_values, style=tt.styles.ascii_thin_double, padding=(0, 1), ) print("\n")
def print_table(items, fields=['date', 'id'], sort=None, style='markdown'): _sort = fields[0] if sort is None else sort df = items_to_dataframe(items, sort=_sort) data = df[fields].values tt.print(data, header=fields, style=eval(f"tt.styles.{style}"))
if inStock: stock = color("OK", Colors.green) totalStock += 1 else: stock = color("SEM STOCK", Colors.red) outOfStock.append(title.replace(' | PCDIGA', '')) stocks.append(stock) # Table header header = ["Produto", "Stock", "Preço"] data = [] # Table contents - Loop products and add details to data var for table printing for i, product in enumerate(products): data.append([titles[i], stocks[i], "€%s" % (prices[i])]) # Table footer data.append(["", "TOTAL", round(total, 2)]) # Print table to STDOUT tt.print(data, header=header) client = Client(config.clientKey, api_token=config.apiToken) if totalStock == len(products): client.send_message("Todos os %s produtos em stock!" % (totalStock)) else: client.send_message("%s de %s em stock. \nIndisponíveis: \n %s" % (totalStock, len(products), '\r\n'.join(outOfStock)))
save_str += layer_name + delimiter + str(input_shape) + delimiter + str( output_shape) + delimiter + str(params) + delimiter + str( params_in_bytes) + delimiter + str( trainable_param) + delimiter + str( trainable_param_in_bytes) + "\n" # print(layer, str(input_shape), str(output_shape), str(params)) import termtables as tt header = [ "Layer", "Input Shape", "Output Shape", "Num of Params", "Params (MB)", "Num of Trainable Params", "Trainable Params (MB)" ] # data = [ # [1, 2, 3], [613.23236243236, 613.23236243236, 613.23236243236] # ] print(row_data) tt.print(row_data, header=header, style=tt.styles.ascii_thin, padding=(1, 1, 1, 1), alignment="ccccccc") print("Total Param Memory : {} MB, Total Trainable Param Memory {} MB".format( total * 4 / (1024**2), trainable_params * 4 / (1024**2))) with open("stats/" + network_name + ".info", "w") as fp: fp.write(save_str)
item_['mix'], item_['sources'], approach=APPROACH, mask_type='soft', **KWARGS) estimates = separator() evaluator = evaluation.BSSEvalScale( list(item_['sources'].values()), estimates, compute_permutation=True) scores = evaluator.evaluate() output_path = os.path.join(RESULTS_DIR, f"{item_['mix'].file_name}.json") with open(output_path, 'w') as f: json.dump(scores, f) pool = ThreadPoolExecutor(max_workers=NUM_WORKERS) for i, item in enumerate(tqdm.tqdm(test_dataset)): if i == 0: separate_and_evaluate(item) else: pool.submit(separate_and_evaluate, item) pool.shutdown(wait=True) json_files = glob.glob(f"{RESULTS_DIR}/*.json") df = evaluation.aggregate_score_files(json_files) overall = df.mean() headers = ["", f"OVERALL (N = {df.shape[0]})", ""] metrics = ["SAR", "SDR", "SIR"] data = np.array(df.mean()).T data = [metrics, data] termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
def log_stats(self): # Calculate run time m = 60 h = m * 60 d = h * 24 w = d * 7 run_time = time.time() - self.ios[0].stime self.logger.info('\n\nUpdate interval saturated.') date_now = dt.now().strftime("%d/%m/%Y %H:%M") print(150 * '─') print('DATA COLLECTION STATISTICS') print(date_now) print( "Running time: {0:.0f} hours {1:.0f} minutes. Iteration length {2} minutes." .format(run_time // h, (run_time % h) / m, self.update_interval)) self._update_iter_filesizes() for i, io, curr, last in zip(range(len(self.ios)), self.ios, self.curr_filesizes, self.last_filesizes): if io.c_saved > 0: is_seq = len(self.iter_sizes_mb[i]) > 1 upint_s = self.update_interval * 60 iter_stats = self._calculate_iter_stats( i, io, curr, last, is_seq) self.logger.info("\n" + 120 * '=') print("\n// IO {0}: (STATS) ".format(io.ID)) if iter_stats['total_size_gb'] < 1.0: daily = "\nDAILY {0:>12,.0f} tweets / {1:<7.1f} MB ".format( io.daily_c_saved, iter_stats['total_size_gb'] * 1024) else: daily = "\nDAILY {0:>12,.0f} tweets / {1:<7.1f} GB".format( io.daily_c_saved, iter_stats['total_size_gb']) daily += " | Min / Max daily iter (all time): {0:,.0f} / {1:,.0f} ".format( min(self.daily_extvals[i]['tweets_gained']), max(self.daily_extvals[i]['tweets_gained'])) daily += " ( {0:,.0f} / {1:,.0f} ) tweets".format( min(self.extvals[i]['tweets_gained']), max(self.extvals[i]['tweets_gained'])) daily += " | {0:,.1f} / {1:,.1f} ".format( min(self.daily_extvals[i]['size_gained_mb']), max(self.daily_extvals[i]['size_gained_mb'])) daily += " ( {0:,.1f} / {1:,.1f} ) MB".format( min(self.extvals[i]['size_gained_mb']), max(self.extvals[i]['size_gained_mb'])) print(daily) print("ITERATION: {0}".format(len(self.iter_sizes_mb[i]))) l_base = self._format_base_stats(iter_stats, io, is_seq) l_base.append([27 * "=" for L in range(len(l_base[0]))]) l_iter = self._format_iter_stats(iter_stats, run_time, upint_s, m, h, d, w) l_dev = None if is_seq: l_dev = self._format_deviation(iter_stats) l_agg = self._format_agg_stats(iter_stats, run_time, io, i, upint_s, m, h, d, w) for L in l_iter: l_base.append(L) if is_seq: for L in l_dev: l_base.append(L) for L in l_agg: l_base.append(L) tt.print(np.array(l_base)) self.logger.debug("STATSMODULE: IO {0}, is_ filter {1}".format( io.ID, io.is_filter)) if io.is_filter: self._plot_cities(io) else: self.logger.info('No tweets recorded yet.') if len(self.iter_sizes_mb[0]) > 5: print("\nVolume of tweets as a function of time (minutes):") for i, _ in enumerate(self.ios): self._plot_volume(i)
def generate_summary(self): _CONVERSION_VALUE = _BYTE_TO_KILO_BYTE_RATIO if self._converter == 'MB': _CONVERSION_VALUE = _BYTE_TO_MEGA_BYTE_RATIO if self._converter == 'GB': _CONVERSION_VALUE = _BYTE_TO_GIGA_BYTE_RATIO if self._input_size is None or self._model is None: raise Exception("Input Size {} or Model {} is not defined".format( self._input_size, self._model)) sm1 = self.network_summary() total = 0 trainable_params = 0 total_output = 0 layers = [] input_shapes = [] output_shapes = [] param_items = [] param_items_bytes = [] trainable_param_items_bytes = [] trainable_params_list = [] # append for table print row_data = [] save_str = "" ''' Generate Header of the stats file Use default delimiter or any other symbol other than "[" or "," or "(" "," Is used within internal data structures, it could complicate file reading with Excel, LibCalc, etc ''' if self._delimiter == ",": raise Exception( " ',' Is used within internal data structures, it could complicate file reading with Excel, LibCalc, etc" ) if self._save: for col_id, col_name in enumerate(self._header): save_str += col_name + self._delimiter save_str += "\n" ################################################################################################################ ########################################## Iterate Through Layers ############################################## ################################################################################################################ for layer in sm1: layer_name = layer input_shape = str(sm1[layer]["input_shape"]) output_value = sm1[layer]["output_shape"] output_shape = str(output_value) params = sm1[layer]["nb_params"] layers.append(layer_name) input_shapes.append(input_shape) output_shapes.append(output_shape) trainable_param = 0 if isinstance(params, th.Tensor): params = params.item() param_items.append(params) params_in_bytes = params * self._bytes_per_param / _CONVERSION_VALUE param_items_bytes.append(params_in_bytes) if "trainable" in sm1[layer]: if sm1[layer]["trainable"] == True: trainable_param = params trainable_params += trainable_param trainable_param_items_bytes.append(trainable_param) trainable_param_in_bytes = trainable_param * self._bytes_per_param / _CONVERSION_VALUE total += params total_output += th.prod(th.Tensor(output_value)).item() row_data.append([ layer_name, input_shape, output_shape, params, params_in_bytes, trainable_param, trainable_param_in_bytes ]) if self._save: save_str += layer_name + self._delimiter + str(input_shape) + self._delimiter + str( output_shape) + self._delimiter + str( params) + self._delimiter + str( params_in_bytes) + self._delimiter + str(trainable_param) + self._delimiter + str( trainable_param_in_bytes) \ + "\n" alignment = "" ################################################################################################################ ################################################################################################################ ################################################################################################################ # total number of parameters self._total_params = total # total memory of parameter self._total_params_memory = total * self._bytes_per_param / _CONVERSION_VALUE # total trainable number of parameters self._total_trainable_params = trainable_params # total memory of trainable parameters self._total_trainable_params_memory = trainable_params * self._bytes_per_param / _CONVERSION_VALUE # total number forward parameters self._forward_params = total_output # total memory of forward parameters self._forward_params_memory = total_output * self._bytes_per_param / _CONVERSION_VALUE # backward and forward params are equal self._backward_params = self._forward_params self._backward_params_memory = self._forward_params_memory self._total_activation_parameters = self._forward_params + self._backward_params self._total_activation_memory = self._forward_params_memory + self._backward_params_memory self._total_input_params = th.prod(th.Tensor( self._input_size)).item() * self._batch_size self._total_input_memory = self._total_input_params * self._bytes_per_param / ( _CONVERSION_VALUE) row_data.append([ 'Total Values', '', '', str(self._total_params), str(self._total_params_memory), str(self._total_trainable_params), str(self._total_trainable_params_memory) ]) if self._save: save_str += 'Total Values' + self._delimiter + '' + self._delimiter + '' + self._delimiter \ + str(self._total_params) + self._delimiter + str(self._total_params_memory) + self._delimiter \ + str(self._total_trainable_params) + self._delimiter + str( self._total_trainable_params_memory) + "\n" if self._console: for _, _ in enumerate(self._header): alignment += "c" tt.print(row_data, header=self._header, style=tt.styles.ascii_thin, padding=(1, 1, 1, 1), alignment=alignment) summary_statement = "" + "\n" if self._console or self._save: self._delimiter = " = " summary_statement += "MiniBatch Size" + self._delimiter + str( self._batch_size) + "\n" summary_statement += "Total Input Parameters " + self._delimiter + str( self._total_input_params) + "\n" summary_statement += "Total Input Memory " + self._delimiter + str( self._total_input_memory) + " " + self._converter + "\n" summary_statement += 'Forward Parameters' + self._delimiter + str( self._forward_params) + "\n" summary_statement += 'Forward Parameters Memory' + self._delimiter + str( self._forward_params_memory) + " " + self._converter + "\n" summary_statement += 'Backward Parameters' + self._delimiter + str( self._backward_params) + "\n" summary_statement += 'Backward Parameters Memory' + self._delimiter + str( self._backward_params_memory) + " " + self._converter + "\n" summary_statement += 'Total Activation Parameters' + self._delimiter + str( self._forward_params + self._backward_params) + "\n" summary_statement += 'Total Activation Parameters Memory' + self._delimiter + str( (self._forward_params_memory + self._backward_params_memory)) + " " + self._converter + "\n" width, _ = click.get_terminal_size() click.echo('*' * width) print(summary_statement) click.echo('*' * width) save_str += summary_statement if self._save_path: with open(self._save_path, "w") as fp: fp.write(save_str) else: raise Exception("Save Path not specified")
"page": 1, "start": 1, "userIds": list(range(1, 10000)) }, timeout=20)["d"] print("Scanning users for matches...") results = [] for user in users: user["dist"] = distance(searchName, user["n"]) if (searchName in user["n"].lower()): user["dist"] = user["dist"] - 10 if (user["n"].lower().startswith(searchName)): user["dist"] = user["dist"] - 5 results.append(user) results = sorted(results, key=lambda k: k["dist"])[0:10] userResults = [] for result in results: userId = result["id"] userName = result["n"] userResults.append([userId, userName]) print("10 most relevant results") tt.print(userResults, header=["User ID", "Full Name"], style=tt.styles.rounded_thick)
def plot(self) -> None: """Plot the series and the parameters.""" flag = 1 while flag: plt.close() plt.ion() plt.plot(self.bins, self.z2n, label='Z2n Power', linewidth=2) plt.xlabel('Frequency (Hz)') plt.ylabel('Power') plt.legend(loc='best') plt.tight_layout() try: stats.error(self) header = ["", "Z2N POWER", "GAUSSIAN FIT"] data = [ ["Power", f"{self.power}", f"{self.gauss.power}"], [ "Frequency", f"{self.frequency} Hz", f"{self.gauss.frequency} Hz" ], ["Frequency error", "_", f"+/- {self.gauss.errorf} Hz"], ["Period", f"{self.period} s", f"{self.gauss.period} s"], ["Period error", "_", f"+/- {self.gauss.errorp} s"], [ "Pulsed Fraction", f"{self.pulsed* 100} %", f"{self.gauss.pulsed* 100} %" ], ] termtables.print(data, header) plt.close() plt.ion() plt.plot(self.bins, self.z2n, label='Z2n Power', linewidth=2) plt.plot(self.gauss.bins, self.gauss.z2n, color='tab:red', label='Gaussian Fit', linewidth=1) plt.xlabel('Frequency (Hz)') plt.ylabel('Power') plt.legend(loc='best') plt.tight_layout() except IndexError: click.secho("Error on the selection.", fg='red') else: if not click.confirm("Select another region for the fit"): self.save_file() flag = 0 click.secho("Save the results on a log file.", fg='yellow') default = "z2n_" + pathlib.Path(self.input).stem flag2 = 1 while flag2: log = click.prompt("\nName of the file", default, type=click.Path()) if pathlib.Path(f"{log}.log").is_file(): click.secho("File already exists.", fg='red') else: flag2 = 0 with open(f"{log}.log", "w+") as logfile: sys.stdout = logfile self.get_input() self.get_output() self.get_format() self.get_time() self.get_exposure() self.get_sampling() self.get_nyquist() self.get_fmin() self.get_fmax() self.get_delta() self.get_bins() self.get_harmonics() click.secho("Periodogram values.", fg='yellow') self.get_power() self.get_frequency() self.get_period() self.get_pfraction() click.secho("Gaussian values.", fg='yellow') self.gauss.get_power() self.gauss.get_frequency() self.gauss.get_errorf() self.gauss.get_period() self.gauss.get_errorp() self.gauss.get_pfraction() sys.stdout = sys.__stdout__ click.secho(f"Saved the results at {log}.log", fg='green')
]) else: # Future expansion: add explanation of operation-key o365_data.append([ creationtime, clientip, operation, resultstatus, userid, objectid ]) except: #print(auditdata) failed_line_count += 1 continue line_count += 1 # Print details tt.print(o365_data, header=o365_header) # Print Client IPs tt_data = [] tt_header = ["Client IP", "Count"] o365_client_ip = sorted(o365_client_ip.items(), key=lambda x: x[1], reverse=True) for el in o365_client_ip: tt_data.append([el[0], el[1]]) tt.print(tt_data, header=tt_header) # Print Operations tt_data = [] tt_header = ["Operation", "Count"] o365_unique_operations = sorted(o365_unique_operations.items(),