def __init__(self): '''Shows a table in console with the output of linear regression 'parameters' ''' calculate = Calculate("history.txt") values = calculate.get_regression_linear() table = Texttable() table.set_cols_align(["c", "c", "c", "c", "c", "c"]) table.set_cols_valign(["m", "m", "m", "m", "m", "m"]) table.set_cols_dtype(['t', 'f', 'f', 'f', 'f', 'f']) table.set_cols_width([10, 15, 10, 10, 10, 10]) head = [ color(bcolors.GREEN, "Test"), color(bcolors.GREEN, "Expected Values"), '', '', '', '' ] parameters = [ '', color(bcolors.GREEN, "B0"), color(bcolors.GREEN, "B1"), color(bcolors.GREEN, "r"), color(bcolors.GREEN, "r^2"), color(bcolors.GREEN, "P") ] rows = [] rows.append(head) rows.append(parameters) [rows.append(element) for element in values] table.add_rows(rows) print(table.draw() + "\n")
def print_table(data, header, title): """ :param data: table data :param header: table header names :param title: table title """ data = list(data) if len(data) == 0: return cols = len(data[0]) assert len(header) == cols table = Texttable() table.set_cols_align(["c"] * cols) table.set_cols_valign(["m"] * cols) table.add_rows([header] + list(data)) table_str = table.draw() table_width = len(table_str.split()[0]) title_str = f"{title}:".center(table_width) print() print(title_str) print() print(table_str) print()
def render_schemas_as_table(schemas, display_heading=True): """ Returns ASCII table view of schemas. :param schemas: The schemas to be rendered. :type schemas: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Schema\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (schemas.url, schemas.total_count, schemas.limit, schemas.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["ID", "Name", "Namespace", "Type", "Subtype", "Immutable", "Hidden"]) for schema in schemas: table.add_row([schema.id, schema.name, schema.namespace, schema.type, schema.subtype or '', str(bool(schema.immutable)), str(bool(schema.hidden))]) return heading + table.draw() + "\n"
def print_llc(): total_llc_used = 0 total_llc_avg = 0 table = Texttable() table.set_cols_align(["l", "r", "c", "c"]) table.set_cols_valign(["t", "m", "b", "b"]) rows = [] i = 1 rows.append([ "Seq", get_color_string(bcolors.GREEN, "Instance-name"), "LLC / KB", "LLC_AVG / KB" ]) for dom in dom_info.items(): rows.append([ i, get_color_string(bcolors.BLUE, dom[1].name), dom[1].llc, dom[1].avg ]) total_llc_used += dom[1].llc total_llc_avg += dom[1].avg i = i + 1 rows.append([ 'all', get_color_string(bcolors.BLUE, "Total"), total_llc_used, total_llc_avg ]) table.add_rows(rows) os.system('cls' if os.name == 'nt' else 'clear') print(table.draw() + '\n')
def get_two_column_table_head(): table = Texttable() table.set_cols_align(["l", "l"]) table.set_cols_valign(["t", "t"]) table.set_deco(Texttable.HEADER | Texttable.VLINES | Texttable.BORDER) table.set_max_width(0) return table
class PredictionTable: __slots__ = ["table"] def __init__(self): from texttable import Texttable self.table = Texttable() self.table.set_deco(Texttable.HEADER | Texttable.HLINES) self.table.set_cols_dtype(["t", "t", "t", "t", "t"]) self.table.set_cols_align(["c", "r", "r", "r", "l"]) self.table.set_cols_valign(["t", "t", "t", "t", "m"]) self.table.header([_("H"), _("PRESSURE"), _("ALT"), _("ALT/hr"), ""]) self.table.set_cols_width([5, 11, 7, 6, 38]) # total width = 80 (with added borders) def _add(self, hour: str, pressure: str, alt: str = "", alt_h: str = "", times: str = ""): return self.table.add_row([hour, pressure, alt, alt_h, times]) def add_start(self, hour: int, minute: int, pressure: float, times: iter): _h = "{:d}h{:02d}".format(int(hour), int(minute)) _p = "{:.2f} hPa".format(float(pressure)) _t = ", ".join(times) return self._add(hour=_h, pressure=_p, times=_t) def add(self, hour: int, pressure: float, alt: float, alt_h: float, times: iter) -> int: _h = "{:d}h".format(int(hour)) _p = "{:.2f} hPa".format(pressure) if type(pressure) is float else "" _a = "{:.1f}m".format(alt) if type(alt) is float else "" _ah = "{:.1f}m".format(alt_h) if type(alt) is float else "" _t = ", ".join(times) return self._add(hour=_h, pressure=_p, alt=_a, alt_h=_ah, times=_t) def display_table(self): return self.table.draw()
def print_steps(self, show_result=True): def max_len_of_list_of_str(s): return max(len(line) for line in str(s).split('\n')) def autodetect_width(d): widths = [0] * len(d[0]) for line in d: for _i in range(len(line)): widths[_i] = max(widths[_i], max_len_of_list_of_str(line[_i])) return widths if self.save_history: if self.errors: self.history = self.history[:-1] t = Texttable() header = ['№', 'Term', 'Code' ] if self.parallel else ['№', 'Term', 'Code', 'Stack'] data = [header] + [[repr(i) for i in item][:-1] if self.parallel else [repr(i) for i in item] for item in self.history] t.add_rows(data) t.set_cols_align(['l'] + ['r'] * (len(header) - 1)) t.set_cols_valign(['m'] + ['m'] * (len(header) - 1)) t.set_cols_width(autodetect_width(data)) print t.draw() else: if not self.errors: print ' Steps: %10s' % self.iteration if show_result: print 'Result: %10s' % repr(self.term)
def render_datasets_as_table(datasets, display_heading=True): """ Returns ASCII table view of datasets. :param datasets: The datasets to be rendered. :type datasets: :class:`mtclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Dataset\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (datasets.url, datasets.total_count, datasets.limit, datasets.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm']) table.header(["Dataset ID", "Experiment(s)", "Description", "Instrument"]) for dataset in datasets: table.add_row([ dataset.id, "\n".join(dataset.experiments), dataset.description, dataset.instrument ]) return heading + table.draw() + "\n"
def __init__(self): '''Show results ''' calculate = Requeriments() values = calculate.calculate_all_values() labels = ["r","r^2","Significance","B0","B1","P","Range","UPI(70%)","LIP(70%)"] table = Texttable() table.set_cols_align(["c", "c","c"]) table.set_cols_valign(["m","m","m"]) table.set_cols_dtype(['t','t','f']) table.set_cols_width([15 for i in range(3)]) table.set_precision(9) label = [ color(bcolors.GREEN,"Test"), color(bcolors.GREEN,"Parameter"), color(bcolors.GREEN,"Expected Value")] rows = [] rows.append(label) number = 0 for item in values: number += 1 text = 'Test%d' % number for pos in range(9): rows.append([text, labels[pos], item[pos]]) table.add_rows(rows) print(table.draw() + "\n")
def __init__(self): '''Shows a table Integrate Numerically the Student’s t-distribution 'probability density 'function ( t-distribution pdf) using Simpson’s' 'rule. The total' 'probability' is the area of the function (the integral)' 'from -t to t. The total probability is p' ''' calculate = Calculate('history.txt') values = calculate.get_total_probability_p() table = Texttable() table.set_cols_align(["l", "c", "c"]) table.set_cols_valign(["m", "m", "m"]) table.set_cols_dtype(['t', 'i', 'f']) table.set_precision(5) label = [ color(bcolors.GREEN,"Test"),'', color(bcolors.GREEN,"Expected Values")] head = [ color(bcolors.GREEN,"t"), color(bcolors.GREEN,"dof"), color(bcolors.GREEN,"p")] rows = [] rows.append(label) rows.append(head) [rows.append(element) for element in values] table.add_rows(rows) print(table.draw() + "\n")
def test_colored(): table = Texttable() table.set_cols_align(["l", "r", "c"]) table.set_cols_valign(["t", "m", "b"]) table.add_rows([[ get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname" ], ["Mr\nXavier\nHuon", 32, "Xav'"], [ get_color_string(bcolors.BLUE, "Mr\nBaptiste\nClement"), 1, get_color_string(bcolors.RED, "Baby") ]]) expected_output = dedent(""" +----------------+-----+----------+ | [92mName Of Person[0m | Age | Nickname | +================+=====+==========+ | Mr | | | | Xavier | 32 | | | Huon | | Xav' | +----------------+-----+----------+ | [94mMr[0m | | | | [94mBaptiste[0m | 1 | | | [94mClement[0m | | [91mBaby[0m | +----------------+-----+----------+ """).strip('\n') assert table.draw() == expected_output
def test_chaining(): table = Texttable() table.reset() table.set_max_width(50) table.set_chars(list('-|+=')) table.set_deco(Texttable.BORDER) table.set_header_align(list('lll')) table.set_cols_align(list('lll')) table.set_cols_valign(list('mmm')) table.set_cols_dtype(list('ttt')) table.set_cols_width([3, 3, 3]) table.set_precision(3) table.header(list('abc')) table.add_row(list('def')) table.add_rows([list('ghi')], False) s1 = table.draw() s2 = (Texttable() .reset() .set_max_width(50) .set_chars(list('-|+=')) .set_deco(Texttable.BORDER) .set_header_align(list('lll')) .set_cols_align(list('lll')) .set_cols_valign(list('mmm')) .set_cols_dtype(list('ttt')) .set_cols_width([3, 3, 3]) .set_precision(3) .header(list('abc')) .add_row(list('def')) .add_rows([list('ghi')], False) .draw()) assert s1 == s2
def print_steps(self, show_result=True): def max_len_of_list_of_str(s): return max(len(line) for line in str(s).split('\n')) def autodetect_width(d): widths = [0] * len(d[0]) for line in d: for _i in range(len(line)): widths[_i] = max(widths[_i], max_len_of_list_of_str(line[_i])) return widths if self.save_history: if self.errors: self.history = self.history[:-1] t = Texttable() header = ['№', 'Term', 'Code'] if self.parallel else ['№', 'Term', 'Code', 'Stack'] data = [header] + [ [repr(i) for i in item][:-1] if self.parallel else [repr(i) for i in item] for item in self.history] t.add_rows(data) t.set_cols_align(['l'] + ['r'] * (len(header) - 1)) t.set_cols_valign(['m'] + ['m'] * (len(header) - 1)) t.set_cols_width(autodetect_width(data)) print t.draw() else: if not self.errors: print ' Steps: %10s' % self.iteration if show_result: print 'Result: %10s' % repr(self.term)
def summary(self, deco: int = Texttable.BORDER, cols_align: Optional[List[str]] = None, cols_valign: Optional[List[str]] = None) -> CollateFunction: """ Get summary of trainer Args: deco (int): border of texttable cols_align (List[str], optional): list of string of columns' align cols_valign (List[str], optional): list of string of columns' valign Returns: torecsys.data.dataloader.CollateFunction: self """ if cols_align is None: cols_align = ['l', 'l', 'l'] if cols_valign is None: cols_valign = ['t', 't', 't'] t = Texttable() t.set_deco(deco) t.set_cols_align(cols_align) t.set_cols_valign(cols_valign) t.add_rows( [['Field Name: ', 'Field Type: ', 'Arguments: ']] + [[k, v, ', '.join(self.kwargs.get(k, {}).keys())] for k, v in self.schema.items()] ) print(t.draw()) return self
def view(self, key=None): """Nice stack data view. Use colorama module to highlight key if passed and texttable for data visualisation """ def __print_select(): for idx, row in enumerate(self): for i in row: if key in i: console_logger.info("select('%s').key(%s)\n" % (Fore.RED + row['class'] + Style.RESET_ALL, Fore.RED + i + Style.RESET_ALL)) console_logger.info("Value of '%s' is %s\n" % (i, Fore.RED + str(row[i]) + Style.RESET_ALL)) console_logger.info("\nStack size: %s\n" % self.size()) table = Texttable() table.set_cols_align(["c", "c"]) table.set_cols_valign(["t", "m"]) table.set_cols_width([8, 150]) table.add_row(["Current Index", "Entry"]) for idx, row in enumerate(self): table.add_row([idx, row]) console_logger.info(table.draw() + "\n") if key: __print_select()
def test_texttable(): table = Texttable() table.set_cols_align(["l", "r", "c"]) table.set_cols_valign(["t", "m", "b"]) table.add_rows([ ["Name", "Age", "Nickname"], ["Mr\nXavier\nHuon", 32, "Xav'"], ["Mr\nBaptiste\nClement", 1, "Baby"], ["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"], ]) assert clean(table.draw()) == dedent('''\ +----------+-----+----------+ | Name | Age | Nickname | +==========+=====+==========+ | Mr | | | | Xavier | 32 | | | Huon | | Xav' | +----------+-----+----------+ | Mr | | | | Baptiste | 1 | | | Clement | | Baby | +----------+-----+----------+ | Mme | | Lou | | Louise | 28 | | | Bourgeau | | Loue | +----------+-----+----------+ ''')
def render_instruments_as_table(instruments, display_heading=True): """ Returns ASCII table view of instruments. :param instruments: The instruments to be rendered. :type instruments: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Instrument\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (instruments.url, instruments.total_count, instruments.limit, instruments.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l']) table.set_cols_valign(['m', 'm', 'm']) table.header(["ID", "Name", "Facility"]) for instrument in instruments: table.add_row([instrument.id, instrument.name, instrument.facility]) return heading + table.draw() + "\n"
def summary( self, deco: int = Texttable.BORDER, cols_align: List[str] = ["l", "l", "l"], cols_valign: List[str] = ["t", "t", "t"]) -> TypeVar("DataloaderCollator"): r"""Get summary of trainer. Args: deco (int): Border of texttable cols_align (List[str]): List of string of columns' align cols_valign (List[str]): List of string of columns' valign Returns: torecsys.data.dataloader.DataloaderCollator: self """ # create and configure text table t = Texttable() t.set_deco(deco) t.set_cols_align(cols_align) t.set_cols_valign(cols_valign) # append data to text table t.add_rows( [["Field Name: ", "Field Type: ", "Arguments: "]] + \ [[k, v, ", ".join(self.kwargs.get(k, {}).keys())] \ for k, v in self.schema.items()] ) # Print summary with text table print(t.draw()) return self
def load_fs(): t = Texttable() # headers/columns columns = ['Fset#'] for k, h in sorted(features_map.items(), key=lambda kv: kv[1][1]): columns.append(h[1]) matrix_length = len(columns) t.set_cols_width([5] * matrix_length) t.set_cols_align(['c'] * matrix_length) t.set_cols_valign(['m'] * matrix_length) t.set_cols_dtype(['t'] * matrix_length) root_path = os.path.dirname(os.path.realpath(__file__)) fs_dir = os.path.join(root_path, 'config/general_config/') for fs_filename in sorted(os.listdir(fs_dir)): fs_dict = {} if fs_filename.startswith('featureset0') and \ fs_filename.endswith('.yml'): with open(os.path.join(fs_dir, fs_filename)) as fs_file: fs_dict = yaml.load(fs_file, yaml.SafeLoader) datarow = get_data_from_yaml(fs_dict, fs_filename) t.add_rows([columns, datarow]) fs_list.append(fs_filename[10:13]) print(t.draw()) print('\n')
def generate_table(region): ec2 = boto3.client(service_name='ec2', region_name=region) response = ec2.describe_instances() table = Texttable() table.set_cols_align(["c", "c", "c", "c", "c"]) table.set_cols_valign(["m", "m", "m", "m", "m"]) #tablearray = [] #tablearray.append(["Instance ID", "Name", "State", "VolumeId"]) if response['Reservations']: table.header(["Instance ID", "Name", "State", "VolumeId", "DeviceName"]) for r in response['Reservations']: for i in r['Instances']: instanceid = i['InstanceId'] ec2 = boto3.resource('ec2', region) Instance = ec2.Instance(i['InstanceId']) if Instance.tags: for tag in Instance.tags: if tag['Key'] == 'Name': name = tag['Value'] #There may be another condition here else: name = " " state = i['State']['Name'] vs = '' dn = '' for d in i['BlockDeviceMappings']: vs = vs + d['Ebs']['VolumeId'] + '\n' dn = dn + d['DeviceName'] + '\n' table.add_row([instanceid , name , state , vs, dn]) print(table.draw() + "\n") else: print(' No instances found in this region\n')
def tab_text(df, columns_width=None, columns=None, vline=True): if type(df) is not pd.DataFrame: print('Warning:\n', type(df), '\n', df) print('input data is not DataFrame!') return colnum = df.columns.__len__() # rownum = df.__len__() table = Texttable() if not vline: table.set_deco(Texttable.HEADER) table.set_cols_align(["l"] * colnum) table.set_cols_valign(["m"] * colnum) table.set_chars(["-", "|", "+", "="]) table.set_cols_dtype(['t'] * colnum) if not columns_width: columns_width = {} elif type(columns_width) != list: print('colWidth is not dict type!') return '' defaultwidth = [10] * colnum if len(columns_width) > 0: defaultwidth = columns_width # j = 0 # for k in columns_width: # defaultwidth[j] = columns_width[k] table.set_cols_width(defaultwidth) if columns: headnames = columns else: headnames = [s for s in df.columns] rowall = [headnames] + \ [list(df.values[i]) for i in range(len(df))] table.add_rows(rowall) rr = table.draw() return rr
def render_datasets_as_table(datasets, display_heading=True): """ Returns ASCII table view of datasets. :param datasets: The datasets to be rendered. :type datasets: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Dataset\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (datasets.url, datasets.total_count, datasets.limit, datasets.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm']) table.header(["Dataset ID", "Experiment(s)", "Description", "Instrument"]) for dataset in datasets: table.add_row([dataset.id, "\n".join(dataset.experiments), dataset.description, dataset.instrument]) return heading + table.draw() + "\n"
def build_table(data): level = int(data['Level']) character_data = list() character_data.append(['Name', data['Name']]) character_data.append(['Class', data['Class']]) character_data.append(['Faction', data['Faction']]) character_data.append(['Level', data['Level']]) character_data.append(['Wealth', data['Total GP']]) character_data.append(['Experience', data['Total XP']]) if level >= 3: character_data.append( ['Div GP', data['Div GP'] + '/' + data['GP Max']]) character_data.append( ['Div XP', data['Div XP'] + '/' + data['XP Max']]) character_data.append(['ASL Mod', data['ASL Mod']]) else: needed_arena = 1 if level == 1 else 2 needed_rp = 1 if level == 1 else 2 num_arena = int( data['L1 Arena']) if level == 1 else (int(data['L2 Arena 1/2']) + int(data['L2 Arena 2/2'])) num_rp = int( data['L1 RP']) if level == 1 else (int(data['L2 RP 1/2']) + int(data['L2 RP 2/2'])) character_data.append(['RP', str(num_rp) + '/' + str(needed_rp)]) character_data.append( ['Arena', str(num_arena) + '/' + str(needed_arena)]) table = Texttable() table.set_cols_align(["l", "r"]) table.set_cols_valign(["m", "m"]) table.add_rows(character_data) return table.draw()
def test_colored(): table = Texttable() table.set_cols_align(["l", "r", "c"]) table.set_cols_valign(["t", "m", "b"]) table.add_rows([ [get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"], ["Mr\nXavier\nHuon", 32, "Xav'"], [get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"), 1, get_color_string(bcolors.RED,"Baby")] ]) expected_output = dedent(""" +----------------+-----+----------+ | [92mName Of Person[0m | Age | Nickname | +================+=====+==========+ | Mr | | | | Xavier | 32 | | | Huon | | Xav' | +----------------+-----+----------+ | [94mMr[0m | | | | [94mBaptiste[0m | 1 | | | [94mClement[0m | | [91mBaby[0m | +----------------+-----+----------+ """).strip('\n') assert table.draw() == expected_output
def showJson(data): table = Texttable() table.set_deco(Texttable.BORDER) table.set_cols_align([ "l", "l", "l", "l", "l", "l", "l", "l", "l", "l", "l", "l", "l", "l", "l" ]) # require three columns table.set_cols_valign([ "m", "m", "m", "m", "m", "m", "m", "m", "m", "m", "m", "m", "m", "m", "m" ]) table.set_cols_width( [15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]) print(data) for i in range(len(data)): l = {} # print(type(l)) for j in data[i]: #print((data[i][j])) #print(data[i][j]) l[j] = data[i][j] #print(j, data[i][j]) print(l.values()) table.add_rows([l, l.values()]) print(table.draw() + "\n")
def get_text_table(nbColumn): result = Texttable() result.set_max_width(get_console_window_width()) result.set_cols_align(['c'] * nbColumn) result.set_cols_valign(['m'] * nbColumn) result.set_cols_dtype(['t'] * nbColumn) return result
def build_table(matches, result_map, headers): table = Texttable() if len(matches) > 1: table.set_deco(Texttable.HEADER | Texttable.HLINES | Texttable.VLINES) table.set_cols_align(['c'] * len(headers)) table.set_cols_valign(['c'] * len(headers)) table.header(headers) for match in matches: print(f'match: {match}: {result_map[match]}') data = [match] data.extend(value for value in result_map[match][0:]) # print(f'Adding row: {data}') table.add_row(data) output = '```' + table.draw() + '```' else: table.set_cols_align(["l", "r"]) table.set_cols_valign(["m", "m"]) table.set_cols_width([10, 20]) table.header([headers[0], matches[0]]) data = list(zip(headers[1:], (result_map[matches[0]])[0:])) table.add_rows(data, header=False) output = '`' + table.draw() + '`' return output
def scrape_score(self): """ Scrape web page, retrieve necessary data, format it and return to the user """ page = requests.get(self.url) parsed_markup = BeautifulSoup(page.text, "html.parser") # final version of the table to send to the user scores = Texttable() # settings for table scores.set_cols_width([10, 1, 10]) scores.set_cols_align(['l', 'c', 'r']) # c - center align (horizontal), l - left, r - right scores.set_cols_valign(['m', 'm', 'm']) # m - middle align (vertical) scores.set_chars(['—', '|', '+', '=']) # replace dash with em dash scores.header(["Home Team", "", "Away Team"]) # scrape needed data from the parsed markup for element in parsed_markup.find_all("div", "row-gray"): match_name_element = element.find(attrs={"class": "scorelink"}) if match_name_element is not None and element.find("div", "sco").get_text().split("-")[0].strip() == "?": home_team = shorten_name(' '.join(element.find("div", "tright").get_text().strip().split(" "))) away_team = shorten_name(' '.join(element.find(attrs={"class": "ply name"}).get_text().strip().split(" "))) scores.add_row([home_team, "-", away_team]) return '`' + scores.draw() + '`'
def test_table_update_deaths_down_multi(table_updater_service, stub_bno_dataframe): columns = ["Location", "Cases", "Deaths", "Serious", "Critical", "Recovered", "Notes"] new_data = [ ["Australia", "2", "0", "0", "0", "0", "1 serious"], ["Sweden", "5", "1", "0", "0", "0", ""], ] data_after = pd.DataFrame(new_data, columns=columns) joined_data = stub_bno_dataframe.append(data_after) messages = table_updater_service._make_update_message(joined_data) table = Texttable() table.set_cols_align(["c", "c", "c", "c", "c", "c", "c"]) table.set_cols_valign(["m", "m", "m", "m", "m", "m", "m"]) expected_rows = [ ["Location", "Cases", "Deaths", "Serious", "Critical", "Recovered", "Notes"], ["Australia", "2", "0 (-1)", "0", "0", "0", "1 serious"], ["Sweden", "5", "1 (-1)", "0", "0", "0", ""], ] table.add_rows(expected_rows) expected_table = [f"```{table.draw()}```"] assert expected_table == messages
def render_instruments_as_table(instruments, display_heading=True): """ Returns ASCII table view of instruments. :param instruments: The instruments to be rendered. :type instruments: :class:`mtclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Instrument\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (instruments.url, instruments.total_count, instruments.limit, instruments.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l']) table.set_cols_valign(['m', 'm', 'm']) table.header(["ID", "Name", "Facility"]) for instrument in instruments: table.add_row([instrument.id, instrument.name, instrument.facility]) return heading + table.draw() + "\n"
def check(f, f_grad, x0, name, verbose=False): """ Checks whether gradients of function ``f`` at point x0 is same as the gradients provided by ``f_grad``. ``error`` is the difference between numerical and provided gradients. '%error' = abs(error) / numerical gradient. Parameters ---------- f : callable input function to check gradients against f_grad : callable input function which provides gradients x0 : ndarray the point at which gradients should be calculated name : list a vector with the size of the number of parameters, which provides name for each parameter. This name will be used when generating output table verbose : boolean whether to print output for each parameter separately Returns ------- avg : float average of the percentage error over all the parameters, i.e., mean(%error) """ g = f_grad(x0) if len(g) != len(x0): raise Exception('dimensions mismatch') table = Texttable() table.set_cols_align(["l", "r", "c", "c", "c"]) table.set_cols_valign(["t", "m", "b" , "r", "c"]) rows = [] rows += [["Name ", "analytical ", "numerical ", "error ", "% error "]] if verbose: print 'dimensions:', len(x0) aver_error = 0 for i in range(len(x0)): def f_i(x): return f((concatenate((x0[:i], x, x0[(i+1):])))) t = get_d1(f_i, [x0[i]]) p_errro=None if t != 0: p_errro = abs(t-g[i]) / abs(t) rows += [[name[i], g[i], t, abs(t-g[i]), p_errro]] if abs(g[i]) <1e-4 and abs(t) < 1e-4: pass else: aver_error += abs(t-g[i]) / abs(t) if verbose: print 'element:', i table.add_rows(rows) if verbose: print(table.draw()) return aver_error / len(x0)
def _make_table_update(self, data): table = Texttable() table.set_cols_align(["c"] * len(data.columns)) table.set_cols_valign(["m"] * len(data.columns)) parsed_data = self._collect_differences(data) new_data = [data.columns.tolist()] for index, row in parsed_data.iterrows(): ( location, cases_before, cases_after, deaths_before, deaths_after, notes, ) = row cases_diff = int(cases_after) - int(cases_before) deaths_diff = int(deaths_after) - int(deaths_before) if cases_diff > 0: cases = "".join([cases_after, " (+", str(cases_diff), ")"]) elif cases_diff < 0: cases = "".join([cases_after, " (", str(cases_diff), ")"]) else: cases = cases_after if deaths_diff > 0: deaths = "".join([deaths_after, " (+", str(deaths_diff), ")"]) elif deaths_diff < 0: deaths = "".join([deaths_after, " (", str(deaths_diff), ")"]) else: deaths = deaths_after # TODO: Fix the new columns that were added in with issue #3 new_data.append([location, cases, deaths, "0", "0", "0", notes]) table.add_rows(new_data) message_cache = [] all_messages = [] table_split = table.draw().split("\n") while table_split: message = table_split.pop(0) + "\n" message_cache.append([message]) total_message = "".join(m[0] for m in message_cache) if len(total_message) > 1700 or not table_split: all_messages.append(f"```{total_message[:-1]}```") message_cache = [] return all_messages
def main(): getInodes(".") table = Texttable() table.set_cols_align(["l", "c", "c"]) table.set_cols_valign(["t", "m", "m"]) rows = [["File Names", "Inode", "Links Amount"]] rows.extend([ ("\n".join(inodes[inode] + symlinks[inode]), inode, len(inodes[inode])) for inode in inodes.viewkeys() | symlinks.viewkeys()]) table.add_rows(rows) print (table.draw())
def _get_lboard_table(lboard, page): """Get the leaderboard as a text table.""" table = Texttable() table.set_cols_align(['c', 'c', 'c']) table.set_cols_valign(['m', 'm', 'm']) table.add_rows(leaderboard_list(lboard, page)) return table.draw()
def create_table(data_arr, halign, valign): table_ptr = Texttable() table_ptr.set_cols_align([halign] * data_arr.shape[1]) table_ptr.set_cols_valign([valign] * data_arr.shape[1]) for indx in xrange(data_arr.shape[0]): table_ptr.add_row(data_arr[indx, :]) print print(table_ptr.draw()) print
def show_lcs_table(memo_table): table = Texttable() table.set_cols_align(["c", "c"]) table.set_cols_valign(["t", "t"]) table.set_deco(Texttable.VLINES | Texttable.BORDER) table.set_max_width(0) for memo_line in memo_table: table.add_row(memo_line) print(table.draw())
def draw_object(self, obj, cols_keys, cols_names): rows = [['Key', 'Value']] for col_key in cols_keys: rows.append([cols_names[col_key], obj.__dict__[col_key]]) table = Texttable() table.set_cols_align(["l", "l"]) table.set_cols_valign(["t", "t"]) table.add_rows(rows) print(table.draw())
def func9(): """ 表格处理 """ """ from texttable import Texttable, get_color_string, bcolors table = Texttable() table.set_cols_align(["l", "r", "c"]) table.set_cols_valign(["t", "m", "b"]) table.add_rows([ [table.get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"], ["Mr\nXavier\nHuon", 32, "Xav'"], [table.get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"), 1, table.get_color_string(bcolors.RED,"Baby")] ]) print(table.draw() + "\n") table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_dtype(['t', # text 'f', # float (decimal) 'e', # float (exponent) 'i', # integer 'a']) # automatic table.set_cols_align(["l", "r", "r", "r", "l"]) table.add_rows([["text", "float", "exp", "int", "auto"], ["abcd", "67", 654, 89, 128.001], ["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023], ["lmn", 5e-78, 5e-78, 89.4, .000000000000128], ["opqrstu", .023, 5e+78, 92., 12800000000000000000000]]) print(table.draw()) """ from texttable import Texttable table = Texttable() table.set_cols_align(["l", "r", "c"]) table.set_cols_valign(["t", "m", "b"]) table.add_rows([["Name", "Age", "Nickname"], ["Mr\nXavier\nHuon", 32, "Xav'"], ["Mr\nBaptiste\nClement", 1, "Baby"], ["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]]) print(table.draw() + "\n") table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_dtype([ 't', # text 'f', # float (decimal) 'e', # float (exponent) 'i', # integer 'a' ]) # automatic table.set_cols_align(["l", "r", "r", "r", "l"]) table.add_rows( [["text", "float", "exp", "int", "auto"], ["abcd", "67", 654, 89, 128.001], ["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023], ["lmn", 5e-78, 5e-78, 89.4, .000000000000128], ["opqrstu", .023, 5e+78, 92., 12800000000000000000000]]) print(table.draw())
def list_apps(apps): """Print applications into a pretty table. """ table = Texttable() table.set_cols_align(["r", "l", "l"]) table.set_cols_valign(["m", "m", "m"]) table.add_rows([ ['App #', 'App name', 'App ID #'], ], header = True) c=0 for webapp in apps: c+=1 table.add_row([c, webapp['name'], webapp['id']]) # Print table. print (table.draw() + '\n') return True
def connectionsTable(connections, output_format): """ Get connections in the given output format. """ table = Texttable(max_width=0) # Alignments table.set_cols_valign(['m', 't', 't', 't', 't', 't', 'm', 't', 't']) table.set_cols_align(['l', 'l', 'c', 'l', 'l', 'c', 'c', 'l', 'l']) # Header table.add_row(['#', 'Station', 'Platform', 'Date', 'Time', 'Duration', 'Chg.', 'With', 'Occupancy']) # Connection rows for i, connection in enumerate(connections): table.add_row(_get_connection_row(i, connection)) # Display return table.draw()
def autodoc_class(cls, model_cls): res = [":Declaration type: Mixin"] res.extend([':Inherit model or mixin:', '']) res.extend([' * ' + str(x) for x in model_cls.__anyblok_bases__]) res.extend(['', '']) if has_sql_fields([model_cls]): rows = [['field name', 'Description']] rows.extend([x, y.autodoc()] for x, y in get_fields(model_cls).items()) table = Texttable() table.set_cols_valign(["m", "t"]) table.add_rows(rows) res.extend([table.draw(), '', '']) return '\n'.join(res)
def connectionsTable(connections, output_format): """ Get connections in the given output format. """ table = Texttable(max_width=0) # Alignments table.set_cols_valign(["m", "t", "t", "t", "t", "t", "m", "t", "t"]) table.set_cols_align(["l", "l", "c", "l", "l", "c", "c", "l", "l"]) # Header table.add_row(["#", "Station", "Platform", "Date", "Time", "Duration", "Chg.", "With", "Occupancy"]) # Connection rows for i, c in enumerate(connections): table.add_row(_getConnectionRow(i, c)) # Display return table.draw()
def render_instrument_as_table(instrument): """ Returns ASCII table view of instrument. :param instrument: The instrument to be rendered. :type instrument: :class:`mytardisclient.models.instrument.Instrument` """ instrument_table = Texttable() instrument_table.set_cols_align(['l', 'l']) instrument_table.set_cols_valign(['m', 'm']) instrument_table.header(["Instrument field", "Value"]) instrument_table.add_row(["ID", instrument.id]) instrument_table.add_row(["Name", instrument.name]) instrument_table.add_row(["Facility", instrument.facility]) return instrument_table.draw() + "\n"
def print_table(difftable, name1, name2, detailed=False): table = Texttable(max_width=0) table.set_cols_align(["l", "l", "l", "l", "l"]) table.set_cols_valign(["m", "m", "m", "m", "m"]) table.add_row(["Counter Group", "Counter Name", name1, name2, "delta"]); for k in sorted(difftable): # ignore task specific counters in default output if not detailed and ("_INPUT_" in k or "_OUTPUT_" in k): continue v = difftable[k] row = [] # counter group. using shortname here instead of FQCN if detailed: row.append(k) else: row.append(k.split(".")[-1]) # keys as list (counter names) row.append("\n".join(list(v.keys()))) # counter values for dag1 for key, value in v.items(): if len(value) == 1: value.append(0) value.append(value[0] - value[1]) # dag1 counter values name1Val = [] for key, value in v.items(): name1Val.append(str(value[0])) row.append("\n".join(name1Val)) # dag2 counter values name2Val = [] for key, value in v.items(): name2Val.append(str(value[1])) row.append("\n".join(name2Val)) # delta values deltaVal = [] for key, value in v.items(): deltaVal.append(str(value[2])) row.append("\n".join(deltaVal)) table.add_row(row) print table.draw() + "\n"
def autodoc_fields(declaration_cls, model_cls): """Produces autodocumentation table for the fields. Exposed as a function in order to be reusable by a simple export, e.g., from anyblok.mixin. """ if not has_sql_fields([model_cls]): return '' rows = [['Fields', '']] rows.extend([x, y.autodoc()] for x, y in get_fields(model_cls).items()) table = Texttable(max_width=0) table.set_cols_valign(["m", "t"]) table.add_rows(rows) return table.draw() + '\n\n'
def json_dict_to_txt_table(dict_yaml_file): # table = Texttable(max_width=120) with open(dict_yaml_file, 'r') as f: yaml_stream = yaml.load(f) for main_field, sub_key in yaml_stream.items(): print main_field + ' field' '\n' + '~'*len(main_field+' field') + '\n' field_data_rows = handle_data_items(sub_key) table = Texttable(max_width=120) table.set_cols_align(["l", "c", "l"]) table.set_cols_valign(["t", "m", "m"]) # create table's header table.add_rows([["Sub-key", "Type", "Meaning"]]) table.add_rows(field_data_rows, header=False) print table.draw() + "\n"
def render_api_schema_as_table(api_schema): """ Returns ASCII table view of API schema. :param api_schema: The API schema model to be displayed. :type api_schema: :class:`mytardisclient.models.api.ApiSchema` """ table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(["t", "t"]) table.header(["API Schema field", "Value"]) table.add_row(["Model", api_schema.model]) table.add_row(["Fields", "\n".join(sorted([field for field in api_schema.fields]))]) table.add_row(["Filtering", json.dumps(api_schema.filtering, indent=2, sort_keys=True)]) table.add_row(["Ordering", json.dumps(api_schema.ordering, indent=2, sort_keys=True)]) return table.draw() + "\n"
def render_datafile_as_table(datafile): """ Returns ASCII table view of datafile. :param datafile: The datafile to be rendered. :type datafile: :class:`mytardisclient.models.datafile.DataFile` """ table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["DataFile field", "Value"]) table.add_row(["ID", datafile.id]) table.add_row(["Dataset", datafile.dataset]) locations = [replica.location for replica in datafile.replicas] table.add_row(["Storage Box", "\n".join(locations)]) table.add_row(["Directory", datafile.directory]) table.add_row(["Filename", datafile.filename]) uris = [replica.uri for replica in datafile.replicas] table.add_row(["URI", "\n".join(uris)]) table.add_row(["Verified", str(datafile.verified)]) table.add_row(["Size", human_readable_size_string(datafile.size)]) table.add_row(["MD5 Sum", datafile.md5sum]) datafile_and_param_sets = table.draw() + "\n" for datafile_param_set in datafile.parameter_sets: datafile_and_param_sets += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["DataFileParameter ID", "Schema", "Parameter Name", "String Value", "Numerical Value", "Datetime Value", "Link ID"]) for datafile_param in datafile_param_set.parameters: table.add_row([datafile_param.id, datafile_param.name.schema, datafile_param.name, datafile_param.string_value, datafile_param.numerical_value or '', datafile_param.datetime_value or '', datafile_param.link_id or '']) datafile_and_param_sets += table.draw() + "\n" return datafile_and_param_sets
def render_schema_as_table(schema): """ Returns ASCII table view of schema. :param schema: The schema to be rendered. :type schema: :class:`mytardisclient.models.schema.Schema` """ schema_parameter_names = "" table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["Schema field", "Value"]) table.add_row(["ID", schema.id]) table.add_row(["Name", schema.name]) table.add_row(["Namespace", schema.namespace]) table.add_row(["Type", schema.type]) table.add_row(["Subtype", schema.subtype]) table.add_row(["Immutable", str(bool(schema.immutable))]) table.add_row(["Hidden", str(bool(schema.hidden))]) schema_parameter_names += table.draw() + "\n" schema_parameter_names += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["ParameterName ID", "Full Name", "Name", "Data Type", "Units", "Immutable", "Is Searchable", "Order", "Choices", "Comparison Type"]) for parameter_name in schema.parameter_names: table.add_row([parameter_name.id, parameter_name.full_name.encode('utf8', 'ignore'), parameter_name.name, parameter_name.data_type, parameter_name.units.encode('utf8', 'ignore'), str(bool(parameter_name.immutable)), str(bool(parameter_name.is_searchable)), parameter_name.order, parameter_name.choices, parameter_name.comparison_type]) schema_parameter_names += table.draw() + "\n" return schema_parameter_names
def render_facility_as_table(facility, display_heading=True): """ Returns ASCII table view of facility. :param facility: The facility to be rendered. :type facility: :class:`mytardisclient.models.facility.Facility` :param display_heading: When using the 'table' render format for an `ApiEndpoints` set, setting `display_heading` to True ensures that a heading is displayed before the results table. The heading includes the URL resolved to perform the query. """ heading = "\nModel: Facility\n\n" if display_heading else "" table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["Facility field", "Value"]) table.add_row(["ID", facility.id]) table.add_row(["Name", facility.name]) table.add_row(["Manager Group", facility.manager_group]) return heading + table.draw() + "\n"
def info(self): table = Texttable() table.set_deco(Texttable.HEADER) # table.set_chars(['-', '|', '+', '=']) table.set_cols_dtype(['t', # text 'f', # float (decimal) 't', # text 't']) # text table.set_cols_align(["l", "r", "l", "l"]) table.set_cols_valign(["m", "m", "m", "m"]) # table.set_cols_width([10, 12, 13, 13, 13]) table.header(["Variable","Value","Unit","Description"]) table.add_row(["cr", self._cr,"m","crankshaft radius"]) table.add_row(["cl", self._cl,"m","conrod length"]) table.add_row(["bo", self._bo,"m","bore"]) table.add_row(["pp", self._pp,"m","piston pin offset"]) table.add_row(["cv", self.V(self.TDC())*1e6,"cm3","clearance volume at TDC"]) table.add_row(["TDC", np.degrees(self.TDC()),"deg","angle of piston TDC"]) table.add_row(["BDC", np.degrees(self.BDC()),"deg","angle of piston BDC"]) table.add_row(["Max V", self.V(self.BDC())*1e6,"cm3","volume at BDC"]) table.add_row(["Min V", self.V(self.TDC())*1e6,"cm3","volume at TDC"]) print("\n"+table.draw()+"\n")
def status(self): """Gets the current status of the transaction managers in this bot.""" btcprice = price() table = Texttable() table.set_cols_align(['c', 'c']) table.set_cols_valign(['m', 'm']) table.header(['Current Balance', 'BTC Price']) table.add_row(['$' + str((self.value(btcprice))[0]), str(btcprice)]) print table.draw() t = Texttable() t.set_cols_align(['c', 'c', 'c', 'c']) t.set_cols_valign(['m', 'm', 'm', 'm']) t.header(['TM ID', 'Status', 'Balance', 'Strategy']) for transactor in self.runningTMS: item = [] item.append(str(transactor.identifier)) item.append(str(transactor.status(btcprice))) item.append('$' + str(transactor.value(btcprice))) item.append(str(float(transactor.currstrategy()*100)) + '%') t.add_row(item) print t.draw()
def render_experiment_as_table(experiment): """ Returns ASCII table view of experiment. :param experiment: The experiment to be rendered. :type experiment: :class:`mytardisclient.models.experiment.Experiment` """ exp_and_param_sets = "" table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["Experiment field", "Value"]) table.add_row(["ID", experiment.id]) table.add_row(["Institution", experiment.institution_name]) table.add_row(["Title", experiment.title]) table.add_row(["Description", experiment.description]) exp_and_param_sets += table.draw() + "\n" for exp_param_set in experiment.parameter_sets: exp_and_param_sets += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["ExperimentParameter ID", "Schema", "Parameter Name", "String Value", "Numerical Value", "Datetime Value", "Link ID"]) for exp_param in exp_param_set.parameters: table.add_row([exp_param.id, exp_param.name.schema, exp_param.name, exp_param.string_value, exp_param.numerical_value or '', exp_param.datetime_value or '', exp_param.link_id or '']) exp_and_param_sets += table.draw() + "\n" return exp_and_param_sets
def render_api_endpoints_as_table(api_endpoints, display_heading=True): """ Returns ASCII table view of api_endpoints. :param api_endpoints: The API endpoints to be rendered. :type api_endpoints: :class:`mytardisclient.models.api.ApiEndpoints` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: When using the 'table' render format for an `ApiEndpoints` set, setting `display_heading` to True ensures that a heading is displayed before the results table. The heading includes the URL resolved to perform the query. """ heading = "\n" \ "API Endpoints\n" if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["l", 'l', 'l']) table.set_cols_valign(['m', 'm', 'm']) table.header(["Model", "List Endpoint", "Schema"]) for api_endpoint in api_endpoints: table.add_row([api_endpoint.model, api_endpoint.list_endpoint, api_endpoint.schema]) return heading + table.draw() + "\n"
def render_dataset_as_table(dataset): """ Returns ASCII table view of dataset. :param dataset: The dataset to be rendered. :type dataset: :class:`mytardisclient.models.dataset.Dataset` """ table = Texttable() table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["Dataset field", "Value"]) table.add_row(["ID", dataset.id]) table.add_row(["Experiment(s)", "\n".join(dataset.experiments)]) table.add_row(["Description", dataset.description]) table.add_row(["Instrument", dataset.instrument]) dataset_and_param_sets = table.draw() + "\n" for dataset_param_set in dataset.parameter_sets: dataset_and_param_sets += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["DatasetParameter ID", "Schema", "Parameter Name", "String Value", "Numerical Value", "Datetime Value", "Link ID"]) for dataset_param in dataset_param_set.parameters: table.add_row([dataset_param.id, dataset_param.name.schema, dataset_param.name, dataset_param.string_value, dataset_param.numerical_value or '', dataset_param.datetime_value or '', dataset_param.link_id or '']) dataset_and_param_sets += table.draw() + "\n" return dataset_and_param_sets
def render_datafiles_as_table(datafiles, display_heading=True): """ Returns ASCII table view of datafiles. :param datafiles: The datafiles to be rendered. :type datafiles: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: DataFile\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (datafiles.url, datafiles.total_count, datafiles.limit, datafiles.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["DataFile ID", "Filename", "Storage Box", "URI", "Verified", "Size", "MD5 Sum"]) for datafile in datafiles: uris = [replica.uri for replica in datafile.replicas] locations = [replica.location for replica in datafile.replicas] table.add_row([datafile.id, datafile.filename, "\n".join(locations), "\n".join(uris), str(datafile.verified), human_readable_size_string(datafile.size), datafile.md5sum]) return heading + table.draw() + "\n"
def render_storage_box_as_table(storage_box): """ Returns ASCII table view of storage_box. """ storage_box_options_attributes = "" table = Texttable(max_width=0) table.set_cols_align(['l', 'l']) table.set_cols_valign(['m', 'm']) table.header(["StorageBox field", "Value"]) table.add_row(["ID", storage_box.id]) table.add_row(["Name", storage_box.name]) table.add_row(["Description", storage_box.description]) table.add_row(["Django Storage Class", storage_box.django_storage_class]) table.add_row(["Max Size", storage_box.max_size]) table.add_row(["Status", storage_box.status]) storage_box_options_attributes += table.draw() + "\n" storage_box_options_attributes += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l']) table.set_cols_valign(['m', 'm']) table.header(["StorageBoxOption Key", "StorageBoxOption Value"]) for option in storage_box.options: table.add_row([option.key, option.value]) storage_box_options_attributes += table.draw() + "\n" storage_box_options_attributes += "\n" table = Texttable(max_width=0) table.set_cols_align(["r", 'l']) table.set_cols_valign(['m', 'm']) table.header(["StorageBoxAttribute Key", "StorageBoxAttribute Value"]) for attribute in storage_box.attributes: table.add_row([attribute.key, attribute.value]) storage_box_options_attributes += table.draw() + "\n" return storage_box_options_attributes
else: return str(pool[x]) p = subprocess.check_output('ceph osd dump -f json-pretty', shell=True) pools = json.loads(p)['pools'] pools_table = Texttable() header = [ "Id", "Pool", "Size", "Min_size", "Pg_num", "Pgp_num", "Crush","Quota (MB)", "Quota (obj)" ] keys = [ "pool", "pool_name", "size", "min_size", "pg_num", "pg_placement_num", "crush_ruleset","quota_max_bytes","quota_max_objects" ] pools_table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header)) for pool in pools: pools_table.add_row(map(f, keys)) table = Texttable() table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES) table.set_cols_align( [ "l", "l", "l", "l", "l", "l", "l" ]) table.set_cols_valign([ "m", "m", "m", "m", "m", "m", "m" ]) table.set_cols_width([ "20", "20", "8","8","20","8","8"]) header = [ "Pool", "Image", "Size(Mb)", "Features", "Lockers", "Str_size", "Str_cnt" ] keys = [ "features", "list_lockers", "stripe_unit", "stripe_count" ] table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header)) with rados.Rados(conffile='/etc/ceph/ceph.conf') as cluster: pool_list = cluster.list_pools() for pool in pool_list: table.add_row([ get_color_string(bcolors.GREEN, pool) , "", "", "", "", "", "" ]) with cluster.open_ioctx(pool) as ioctx: rbd_inst = rbd.RBD() image_list = rbd_inst.list(ioctx) for image_name in image_list: with rbd.Image(ioctx, image_name) as image: image_size = str(image.size()/1024**2)
#now_mem = now_mem - 131072 now_mem = now_mem - 262144 #now_mem = now_mem - 524288 set = subprocess.Popen(['virsh', 'setmem', ' %s'%(domainID), '%s'%(now_mem)], stdout=subprocess.PIPE) return else: return if __name__ == '__main__': conn = connect() while(1): #time.sleep(1) domainIDs = get_active_domainIDs(conn) t = Texttable() t.set_cols_align(['c','c','c','c','c','c','c']) t.set_cols_valign(['m','m','m','m','m','m','m']) t.set_cols_width([20,17,20,15,20,13,13]) for domainID in domainIDs: domain_name = get_active_domain_name(conn,domainID) domain_memory = get_current_memory_size(domainID) domain_max_mem = get_active_domain_max_mem(conn,domainID) domain_max_cpu = get_active_domain_max_cpu(conn,domain_name) domain_current_cpu = get_current_cpu_count(domainID) domain_ip = get_active_domain_ip(domainID) domain_online_cpu = get_vm_vcpu_count(domainID) domain_cpu_usage = get_vm_cpu_usage(domainID) domain_mem_usage = get_vm_mem_usage(domainID) domain_disk_total = get_vm_disk_total(domainID) domain_disk_usage = get_vm_disk_usage(domainID) control_vm_cpu(domainID,domain_max_cpu,domain_online_cpu) control_vm_memory(domainID,domain_memory,domain_max_mem)