def create_table(self): """ Create a table with comparison two output files """ title = "" print "\nComparing file names:" print "File 1 = %s\nFile 2 = %s" % (self.file_names[0], self.file_names[1]) print "\nNOTE:\nA raise in number of all elements represent" print "a decrease in the performance of the application." print "Therefore, the smallest the percentage, the better" print "the application performance.\n" elem_name = '' if self.comparison_type == 'event': elem_name = 'Event Name' title = "----- Comparison Table for Events" elif self.comparison_type == 'metric': elem_name = 'Metric Name' title = "----- Comparison Table for Metrics" table_data = [[elem_name, 'File 1', 'File 2', 'Percentage']] for entry in self.results_list: # if float, convert to string so to display two decimal points if isinstance(entry[3], float): entry[3] = str("%.2f" % entry[3]) table_data.append(entry) compare_table = AsciiTable(table_data, title) compare_table.justify_columns = {1: 'right', 2: 'right', 3: 'right'} print compare_table.table
def get_exposure_matrix(rosters, exclude=[]): players = set() for r in rosters: for p in r.players: if p in exclude: continue players.add(p) sorted_names = sorted([p.short_name for p in players]) player_matrix = np.zeros((len(players), len(players)), dtype=int) for r in rosters: for i, p1 in enumerate(sorted_names): for j, p2 in enumerate(sorted_names): if p1 in r and p2 in r: player_matrix[i, j] += 1 rows = [[''] + sorted_names] for i, p in enumerate(sorted_names): rows.append([p] + list(player_matrix[i, :])) table = AsciiTable(rows) table.inner_row_border = True table.justify_columns = {i + 1: 'center' for i in range(len(sorted_names))} return table.table
def __report_summary_labels(self, cumulative): data = [("label", "status", "succ", "avg_rt", "error")] justify = {0: "left", 1: "center", 2: "right", 3: "right", 4: "left"} sorted_labels = sorted(cumulative.keys()) for sample_label in sorted_labels: if sample_label != "": data.append(self.__get_sample_element(cumulative[sample_label], sample_label)) table = SingleTable(data) if sys.stdout.isatty() else AsciiTable(data) table.justify_columns = justify self.log.info("Request label stats:\n%s", table.table)
def __report_summary_labels(self, cumulative): data = [("label", "status", "succ", "avg_rt", "error")] justify = {0: "left", 1: "center", 2: "right", 3: "right", 4: "left"} sorted_labels = sorted(cumulative.keys()) for sample_label in sorted_labels: if sample_label != "": data.append(self.__get_sample_element(cumulative[sample_label], sample_label)) table = SingleTable(data) if sys.stdout.isatty() else AsciiTable(data) table.justify_columns = justify self.log.info("Request label stats:\n%s", table.table)
def print_table(self): ''' Pretty print the table with metric, value and percentage ''' met_table = [['Metric', 'Value', 'Percentage']] for row in self.metrics_values: met_table.append(row) met_tab = AsciiTable(met_table) met_tab.justify_columns = {1: 'right', 2: 'right'} print(met_tab.table)
def test_multi_line(): """Test multi-line tables.""" table_data = [ ['Show', 'Characters'], [ 'Rugrats', dedent( 'Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\n' 'Susie Carmichael, Dil Pickles, Kimi Finster, Spike') ], [ 'South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick' ] ] table = AsciiTable(table_data) expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""" ) assert expected == table.table table.inner_row_border = True expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | +------------+-------------------------------------------------------------------------------------+ | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""" ) assert expected == table.table table.justify_columns = {1: 'right'} expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | +------------+-------------------------------------------------------------------------------------+ | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""" ) assert expected == table.table
def echo(self, data): table = AsciiTable(data) table.inner_footing_row_border = False table.inner_row_border = False table.inner_column_border = False table.outer_border = False # print("{}".format(justify_columns)) if not self.style is None: table.justify_columns = self.style click.echo(table.table)
def list_users(self): users_table = [['Room', 'User']] for room, members in rooms.iteritems(): for conn in members: for nickname, sock_conn in conn.iteritems(): users_table.append([room, nickname]) users_table_instance = AsciiTable(users_table) users_table_instance.inner_heading_row_border = True users_table_instance.inner_row_border = False users_table_instance.justify_columns = {0: 'left', 1: 'left'} print users_table_instance.table
def print(self, data, **kwargs): table = AsciiTable(data) table.inner_footing_row_border = False table.inner_row_border = False table.inner_column_border = False table.outer_border = False if "inner_heading_row_border" in kwargs: table.inner_heading_row_border = kwargs["inner_heading_row_border"] if "style" in kwargs: table.justify_columns = kwargs["style"] click.echo(table.table)
def __get_table(self, header, data, title=""): table_headers = header["headers"] table_headers_desc = header["descriptions"] table_data = [] table_header, justify_columns = self.__set_header( table_headers, table_headers_desc) table_data.append(table_header) table_data.extend(self.__set_table_data(data, table_headers)) table_instance = AsciiTable(table_data, title) table_instance.justify_columns = justify_columns return table_instance.table.splitlines()
def test_multi_line(): """Test multi-lined cells.""" table_data = [ ['Show', 'Characters'], ['Rugrats', 'Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\nDil Pickles'], ['South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick'] ] table = AsciiTable(table_data) # Test defaults. actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected # Test inner row border. table.inner_row_border = True actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected # Justify right. table.justify_columns = {1: 'right'} actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected
def search(self, criteria: str = '', limit: int = 10, format: str = 'table'): total_in_db = self.session.query(BooksTable.uid).count() r = self.session.query(BooksTable.title, BooksTable.date_published, BooksTable.pages, BooksTable.url, BooksTable.isbn13)\ .filter(BooksTable.title.like(criteria))\ .order_by(desc(BooksTable.date_published))\ .limit(limit) data = [] # print(self.__default__orm) header = [ colored('Date', "cyan", attrs=['bold']), colored('Pages', "cyan", attrs=['bold']), colored('ISBN13', "cyan", attrs=['bold']), colored('Title', "cyan", attrs=['bold']), colored('Url', "cyan", attrs=['bold']) ] for book in r: data.append([ str(book.date_published), book.pages, book.isbn13, textwrap.fill(book.title, 90), textwrap.fill(book.url, 100) ]) if format == 'table': if len(data) == 0: tt.print([[f"No results for: {criteria}"]], style=tt.styles.ascii_thin) else: h = [header] h.extend(data) title = "---| " + colored("Results for:", "yellow") + colored(f" {criteria} ", "green") + \ ", Total DB: " + colored(number_format(total_in_db), "green") + \ ", ORM: " + \ colored(self.__default__orm, "green") + " |" t = AsciiTable(h, title=title) t.inner_row_border = True t.CHAR_OUTER_TOP_LEFT = "╭" t.CHAR_OUTER_BOTTOM_LEFT = "╰" t.CHAR_OUTER_BOTTOM_RIGHT = "╯" t.CHAR_OUTER_TOP_RIGHT = "╮" t.padding_left = 2 t.justify_columns = {0: 'left', 1: 'left', 2: 'left'} print("\n") print(t.table) #tt.print(data, header=header, padding=(0, 1), style=tt.styles.ascii_thin, alignment='lll') elif format == 'json': print(json.dumps(data)) return data
def _print_scores(live_feeds): if len(live_feeds) == 0: print('No live matches at this time') return live_scores = [] for feed in live_feeds: live_scores.append(['Match', feed.description]) live_scores.append(['Status', feed.status()]) live_scores.append(['Summary', feed.summary()]) if feed != live_feeds[-1]: live_scores.append([]) table = AsciiTable(live_scores) table.inner_row_border = True table.justify_columns = {0: 'center', 1: 'center', 2: 'center'} print(table.table)
def _stat_project(self): """ Run statistics based on overral project """ title = "Statistics - Project" self._print_logo(title) aux_dict = {} # calculate the amount of problems per check for kind, problems in self.problems.items(): aux_dict[kind] = len(problems) # sort the amount of problems table_data = [["Problem", "Amount"]] for kind, value in sorted(aux_dict.items(), key=lambda (k, v): (v, k)): table_data.append([kind, str(value)]) stat_table = AsciiTable(table_data) stat_table.justify_columns = {0: 'left', 1: 'center'} print stat_table.table
def print_measurements(measurements, title=None): if title: print_title(title) headers = [('Collection type', 'Time, s', '% of best')] best = min(measurements, key=lambda x: x.value) rows = [ ( m.name, f'{m.value:.6g}' + (' *' if m.name == best.name else ''), f'{(m.value / best.value) * 100:.2f}', ) for m in measurements ] table = AsciiTable(headers + rows) table.justify_columns = {2: 'right'} print(table.table) print('* - best time') print()
def test_multi_line(): """Test multi-line tables.""" table_data = [ ['Show', 'Characters'], ['Rugrats', dedent('Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\n' 'Susie Carmichael, Dil Pickles, Kimi Finster, Spike')], ['South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick'] ] table = AsciiTable(table_data) expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""") assert expected == table.table table.inner_row_border = True expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | +------------+-------------------------------------------------------------------------------------+ | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""") assert expected == table.table table.justify_columns = {1: 'right'} expected = dedent("""\ +------------+-------------------------------------------------------------------------------------+ | Show | Characters | +------------+-------------------------------------------------------------------------------------+ | Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, | | | Susie Carmichael, Dil Pickles, Kimi Finster, Spike | +------------+-------------------------------------------------------------------------------------+ | South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick | +------------+-------------------------------------------------------------------------------------+""") assert expected == table.table
def print_results(results, use_color, outfile): """ Print the given scan results """ table = AsciiTable([["File", "Type", "Code", "Line Number"]] + results) table.justify_columns = {0: "center", 1: "center", 2: "left", 3: "center"} table.inner_row_border = True table.inner_footing_row_border = True table.inner_heading_row_border = True table.outer_border = True output = table.table.split("\n") output[2] = output[2].replace("-", "=") print("\n".join(output) + "\n") if outfile: with open(outfile, "w") as f: f.write("\n".join(output) + "\n") my_print("[+] Analysis complete: %d suspicious code fragments found" % len(results), "blue", use_color=use_color)
def display_pull_stats(reviewers, reviewer_workload): table_data = [ ['Assignee', '# assigned'], ] total = 0 for reviewer in reviewers: workload = reviewer_workload[reviewer] table_data.append( [reviewer, six.text_type(workload)] ) total += workload table_data.append(['TOTAL: ', six.text_type(total)]) table = AsciiTable(table_data) table.inner_footing_row_border = True table.justify_columns = {1: 'center'} print(table.table)
def print_mdn(self): def _(x): return "{0:.4f}".format(x) pi, mu1, mu2, sig1, sig2, corr = self.preprocess_current_mdn() # one table per point with one distribution per line # creating the tables for i in range(len(self.rel_pred_paths[0])): if abs(np.max(self.rel_pred_paths[self.count, i])) > 0.2: name = 'Point ' + str(i) + ' (jump)' else: name = 'Point ' + str(i) table = [[name, 'pi', 'mu1', 'sig1', 'mu2', 'sig2', 'corr']] component = self.component_choice[self.count, i] print() print(self.rel_pred_paths[self.count, i]) # creating one table for a single point prediction for j in range(len(pi[0])): if pi[i, j] > 0.3: number = '*' + str(j) + '*' else: number = str(j) if j == component: number += '!' table.append([ number, _(pi[i, j]), _(mu1[i, j]), _(sig1[i, j]), _(mu2[i, j]), _(sig2[i, j]), _(corr[i, j]) ]) t = AsciiTable(table) t.justify_columns = {0: 'center'} print(t.table)
def get_grid(self, astype='table'): r""" """ from pandas import DataFrame as df geoms = self.geometries().keys() phases = [ p.name for p in self.phases().values() if not hasattr(p, 'mixture') ] grid = df(index=geoms, columns=phases) for r in grid.index: for c in grid.columns: phys = self.find_physics(phase=self[c], geometry=self[r]) if phys is not None: grid.loc[r][c] = phys.name else: grid.loc[r][c] = '---' if astype == 'pandas': pass elif astype == 'dict': grid = grid.to_dict() elif astype == 'table': from terminaltables import AsciiTable headings = [self.network.name] + list(grid.keys()) g = [headings] for row in list(grid.index): g.append([row] + list(grid.loc[row])) grid = AsciiTable(g) grid.title = 'Project: ' + self.name grid.padding_left = 3 grid.padding_right = 3 grid.justify_columns = { col: 'center' for col in range(len(headings)) } elif astype == 'grid': grid = ProjectGrid() return grid
def _stat_file(self): """ Run statistics per file """ title = "Statistics - Per File" self._print_logo(title) # Get all files that have problems files = [] for problems in self.problems.values(): for problem in problems: file_name = problem.file_name if file_name not in files: files.append(problem.file_name) # Calculate the amount of problems in each file data_dict = {} for _file in files: problem_dict = {} for kind, problems in self.problems.items(): for problem in problems: if problem.file_name == _file: problem_dict[kind] = problem_dict.get(kind, 0) + 1 data_dict[_file] = problem_dict # Create table data table_data = [["File", "Total Amount", "Problems"]] for file_name, problems_dict in data_dict.items(): total_ammount = 0 problem = "" for kind, ammount in problems_dict.items(): total_ammount += ammount problem += str(ammount) + " " + kind + "\n" else: table_data.append([file_name, total_ammount, problem.strip()]) stat_table = AsciiTable(table_data) stat_table.inner_row_border = True stat_table.justify_columns = {0: 'left', 1: 'center', 2: 'left'} print stat_table.table
def ascii_table(dicts=None, columns=None, title=None, fill="-", justify_columns=None): if dicts: sorted_dicts = unify_dicts(dicts, key_order=columns, fill=fill) row_values = [d.values() for d in sorted_dicts] table = AsciiTable([[str(c) for c in columns]] + row_values, title=title) if justify_columns: justifications = { i: justify_columns.get(c, "left") for i, c in enumerate(columns) } table.justify_columns = justifications return table.table else: return AsciiTable([["no results to display"]], title=title or "message").table
def print_table(data, columns): d = OrderedDict() headers = [''] + columns rows = [headers] for type, period_idx, cpcode, value in data: if type not in d: d[type] = OrderedDict() if cpcode not in d[type]: d[type][cpcode] = [Decimal('0.00')] * len(columns) d[type][cpcode][period_idx] = Decimal(value) def to_title(s): return s.replace('-', ' ').title() total_values_list = [] for type, data_by_cpcode in d.items(): type_values_list = [] for cpcode, values in data_by_cpcode.items(): # call str as terminaltables can only print strings rows.append([cpcode] + [f(x) for x in values]) type_values_list.append(values) type_values = [sum(x) for x in zip(*type_values_list)] rows.append([]) rows.append([to_title(type)] + [f(x) for x in type_values]) rows.append([]) total_values_list.append(type_values) total_values = [sum(x) for x in zip(*total_values_list)] rows.append(['Total'] + [f(x) for x in total_values]) table = AsciiTable(rows) table.justify_columns = dict((i + 1, 'right') for i in range(len(columns))) print(table.table)
def build_summary_table(dates, repos_views, repos_clones, show_views, show_clones, reverse): labels = [["Name", "All"] + [d.strftime("%m/%d\n%a") for d in dates]] data_rows = [[ repo_views["name"], { "views": { "uniques": repo_views["uniques"], "count": repo_views["count"] }, "clones": { "uniques": repo_clones["uniques"], "count": repo_clones["count"] } } ] + [{ "views": { "uniques": views_breakdown["uniques"], "count": views_breakdown["count"] }, "clones": { "uniques": clones_breakdown["uniques"], "count": clones_breakdown["count"] } } for views_breakdown, clones_breakdown in zip(repo_views["breakdown"], repo_clones["breakdown"])] for (repo_views, repo_clones) in zip(repos_views, repos_clones)] def sort_key(r): return [(c["clones"]["count"], c["views"]["count"]) for c in r[2:]] def filter_func(r): return bool(r[1]["views"]["count"] + r[1]["clones"]["count"]) def fmt_cell(c): if not (c["views"]["count"] + c["clones"]["count"]): return "" line_str = "{uniques}/{count}" lines = [] if show_views: lines.append(line_str.format(**c["views"])) if show_clones: lines.append(line_str.format(**c["clones"])) return "\n".join(lines) data_rows = sorted(data_rows, key=sort_key, reverse=reverse) data_rows = filter(filter_func, data_rows) data_rows = [[r[0]] + list(map(fmt_cell, r[1:])) for r in data_rows] table_rows = labels + data_rows + labels table = AsciiTable(table_rows, "Summary") table.inner_row_border = True table.justify_columns = {i: "center" for i in range(1, len(dates) + 2)} return table.table
def test_schema(): exclusions = os.getenv('EXCLUSION_LIST', '["V1__bootstrap.sql"]') exclusions = exclusions.split(',') for item in exclusions: i = re.search('\[\"(.+?)\"\]', item).group(1) exclusions[exclusions.index(item)] = i file_list, file_path = OF.findSQL() file_list, file_path = OF.orderSQL(file_list, file_path) for ex in exclusions: if ex in file_list: i = file_list.index(ex) file_list.pop(i) file_path.pop(i) table_data = [[ Color('{autogreen}File Name{/autogreen}'), 'Create Table', 'Comments', 'Valid JSON', 'Grants', Color('{red}Errors{/red}') ]] Pass_Total, Errors_Total, Flags_Total = 0, 0, 0 ErrorFile, FlagFile = [], [] for i in range(len(file_list)): print(file_list[i]) Pass = True filePath = file_path[i] Schema = CT.find_table(filePath) T = '' JSONcommands = '' Grants = '' Errors = '' if Schema[0] == True: for title in Schema[1]: T = '{green}Pass{/green}' columns = CT.find_columns(filePath, title) comments = CT.check_correct_comments(filePath, title) if comments[0] == True: Com = '{green}Pass{/green}' if comments[0] == False: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) Com = '{red}Fail{/red}\n' Err = '{red}Comments missing{/red}' for key in comments[1].keys(): if comments[1][key][0] == False: Com += str(key) + '\n' Errors += Err + '\n' J = CJ.json_file_read(filePath, title) if J[0] == True: if J[2] == True: JSONcommands = '{green}Pass{/green}' if J[0] == False: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) JSONcommands = '{red}Fail:{/red}\n' Err = '{red}JSON Error line(s){/red}' for e in J[1]: JSONcommands += str(e) + '\n' Errors += Err + '\n' if J[2] == False: Pass = False Flags_Total += 1 FlagFile.append(file_list[i]) Err = '{yellow}Missing infomation{/yellow}' JSONcommands += '{yellow}Warning:{/yellow}\n' if len(J[3]) != 0: JSONcommands += 'Missing label(s)\n' for e in J[3]: JSONcommands += str(e) + '\n' if len(J[4]) != 0: JSONcommands += 'Missing description(s)\n' for e in J[4]: JSONcommands += str(e) + '\n' G = CG.which_grants(filePath, title) if sum((v == True for v in G[0].values())) != G[1]: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) Gpass = False Grants = '{red}Fail{/red}\n' Err = '{red}Access Expected{/red}\n' for key in G[0].keys(): if G[0][key] == False: Grants += '{blue}' + str(key) + '{/blue}\n' if G[0][key] == True: Grants += '{green}' + str(key) + '{/green}\n' Errors += Err + '\n' else: Gpass = True Grants = '{green}Pass{/green}' if Schema[0] == False: T, Com, JSONcommands, Grants, Errors = '---', '---', '---', '---', '---' Pass = False Flags_Total += 1 FlagFile.append(file_list[i]) if Pass == False: table_data.append([ file_list[i], Color(T), Color(Com), Color(JSONcommands), Color(Grants), Color(Errors) ]) else: Pass_Total += 1 table_style = os.environ.get('TABLE_STYLE') if table_style is None: table_style = 'ascii' reportTable = None if table_style == 'ascii': reportTable = AsciiTable(table_data) else: reportTable = SingleTable(table_data) reportTable.inner_row_border = True reportTable.justify_columns = { 0: 'center', 1: 'center', 2: 'center', 3: 'center', 4: 'center', 5: 'center' } Passes = '{green}Files Passed: ' + str(Pass_Total) + '{/green}' Err = '{red}Errors Found: ' + str(Errors_Total) + '{/red}' Flags = '{yellow}Flags Found: ' + str(Flags_Total) + '{/yellow}' EF = '' FF = '' for f in ErrorFile: if len(EF) == 0: EF += str(f) else: EF += '\n' + str(f) ## Option to add a "Flags found" row to the report table, for f in FlagFile: if len(FF) == 0: FF += str(f) else: FF += '\n' + str(f) table_instance = None if table_style == 'ascii': table_instance = AsciiTable( [[Color(Passes), ''], [Color(Err), EF], [Color(Flags), FF]], ' Formatting Summary ') else: table_instance = SingleTable( [[Color(Passes), ''], [Color(Err), EF], [Color(Flags), FF]], ' Formatting Summary ') table_instance.inner_row_border = True print(reportTable.table) print(table_instance.table) if Errors_Total != 0: print('Schema validation failed') exit(1)