def tabulateConcentrations(r, fmt='psql'): ''' Display the current concentrations in a neat table Args: r (roadrunner instance): Roadruner variable Example: .. code-block:: text >>> print (teUtils.prettyTabular.tabulateConcentrations (r)) +------+-----------------+ | Id | Concentration | |------+-----------------| | S1 | 8.51577 | | S3 | 8.46827 | | S5 | 12.5046 | | S7 | 4.6257 | | S8 | 6.03088 | | S2 | 2.92542 | | S6 | 3.29835 | | S4 | 0.873088 | +------+-----------------+ ''' c = r.getFloatingSpeciesConcentrations() ids = r.getIndependentFloatingSpeciesIds() alist = [] for id, value in zip(ids, c): alist.append([id, value]) print(_tabulate(alist, ['Id', 'Concentration'], tablefmt=fmt))
def tabulateFluxes(r, fmt='psql'): ''' Display the current reaction in a neat table Args: r (roadrunner instance): Roadruner variable Example: .. code-block:: text >>> print (teUtils.prettyTabular.tabulateFluxes (r)) +------+------------------+ | Id | Reaction Rates | |------+------------------| | J1 | 0.7859 | | J2 | 0.785893 | | J3 | 0.785882 | | J4 | 0.78585 | | J5 | 0.785835 | | J6 | 0.785815 | | J7 | 0.785801 | | J8 | 0.785784 | | J9 | 0.785779 | +------+------------------+ ''' c = r.getReactionRates() ids = r.getReactionIds() alist = [] for id, value in zip(ids, c): alist.append([id, value]) print(_tabulate(alist, ['Id', 'Reaction Rates'], tablefmt=fmt))
def full(returns, benchmark=None, rf=0., grayscale=False, figsize=(8, 5), display=True, compounded=True, trading_year_days=252): dd = _stats.to_drawdown_series(returns) dd_info = _stats.drawdown_details(dd).sort_values(by='max drawdown', ascending=True)[:5] if not dd_info.empty: dd_info.index = range(1, min(6, len(dd_info) + 1)) dd_info.columns = map(lambda x: str(x).title(), dd_info.columns) if _utils._in_notebook(): iDisplay(iHTML('<h4>Performance Metrics</h4>')) iDisplay( metrics(returns=returns, benchmark=benchmark, rf=rf, display=display, mode='full', compounded=compounded)) iDisplay(iHTML('<h4>5 Worst Drawdowns</h4>')) if dd_info.empty: iDisplay(iHTML("<p>(no drawdowns)</p>")) else: iDisplay(dd_info) iDisplay(iHTML('<h4>Strategy Visualization</h4>')) else: print('[Performance Metrics]\n') metrics(returns=returns, benchmark=benchmark, rf=rf, display=display, mode='full', compounded=compounded) print('\n\n') print('[5 Worst Drawdowns]\n') if dd_info.empty: print("(no drawdowns)") else: print( _tabulate(dd_info, headers="keys", tablefmt='simple', floatfmt=".2f")) print('\n\n') print('[Strategy Visualization]\nvia Matplotlib') plots(returns=returns, benchmark=benchmark, grayscale=grayscale, figsize=figsize, mode='full', trading_year_days=trading_year_days)
def tabulate(grid, *args, pick=[], headers=[], label='', verbose=False, tablefmt='psql', disable_numparse=True, **kwargs): ''' Tabulate presentation wrapper extends tabulate to be able to pick column numbers to view and supports table label and line number location. ''' width = 0 if not grid else len(grid[0]) if verbose or label: print((currentframe().f_back).f_lineno, " :", label) if pick and headers and (len(pick) > len(headers)): raise ValueError( f"number of cols in pick {len(pick)} exceeds headers {len(headers)}" ) if pick: _s = sorted(pick) _min, _max = _s[0], _s[-1] if (1 + _max > width) or (_min < -width): raise ValueError( f"column pick exceeds the width of grid. Value must be between {-width} and {width - 1}" ) _grid = [[row[i] for i in pick] for row in grid] return _tabulate(_grid, *args, tablefmt=tablefmt, headers=headers, disable_numparse=disable_numparse, **kwargs) if headers and width != len(headers): raise ValueError( "headers do not match the the width of grid. Did you forget to set a pick range?" ) return _tabulate(grid, *args, tablefmt=tablefmt, headers=headers, disable_numparse=disable_numparse, **kwargs)
def makeTable(items): if not items: items = [] return _tabulate( items, headers=["S.No.", "Name", "Path"], tablefmt="pretty", showindex=True )
def tabulate(*args, **kwargs): if "tablefmt" in kwargs: return _tabulate(*args, **kwargs) defaults = { "tablefmt": "pipe", "headers": "keys", "showindex": "always" } defaults.update(kwargs) return Markdown(tabulate(*args, **defaults))
def _html_table(obj, showindex="default"): obj = _tabulate(obj, headers="keys", tablefmt='html', floatfmt=".2f", showindex=showindex) obj = obj.replace(' style="text-align: right;"', '') obj = obj.replace(' style="text-align: left;"', '') obj = obj.replace(' style="text-align: center;"', '') obj = _regex.sub('<td> +', '<td>', obj) obj = _regex.sub(' +</td>', '</td>', obj) obj = _regex.sub('<th> +', '<th>', obj) obj = _regex.sub(' +</th>', '</th>', obj) return obj
def get_calculated_parameter_table(self): jet_calculated_data = [[r'$\Omega$', self.omega], [r'$v_{jet}$', self.v_jet], [r'$L_1$', self.L_1], [r'$L_2$', self.L_2], [r'$L_{1a}$', self.L_1a], [r'$L_{1b}$', self.L_1b], [r'$L_{1c}$', self.L_1c]] jet_calculated_headings = ['Calculated Jet Parameter', 'Value'] return _tabulate(jet_calculated_data, headers=jet_calculated_headings, tablefmt='pipe')
def tabulate(ret_dict, keys_colors=None, add_num=True): """Make a table out of the `dict` returned by the api. :param ret_dict: `dict` returned by the api :param keys_colors: `list` of `tuples` or `str` which contains info on how table should be ordered and colored. Ex: keys_colors=[['rank', {'fg': 'yellow', 'bold': True}], ['username', {'fg': 'blue', 'bold': True}], 'country', 'totalScore'] will color rank with foreground yellow and username with foreground blue. :param add_num: `bool` Whether to add Number coloumn or not """ table = [] for idx, val in enumerate(ret_dict): if add_num: row = [ idx + 1, ] else: row = [] for key in keys_colors: if isinstance(key, list): # NOTE: Test on python 3.3 row.append(click.style(str(val[key[0]]), **key[1])) else: row.append(val[key]) table.append(row) if add_num: headers = [ 'No', ] else: headers = [] for key in keys_colors: if isinstance(key, list): headers.append(click.style(key[0], **key[1])) else: headers.append(key) table = _tabulate(table, headers, tablefmt='psql') return table
def metrics(returns, benchmark=None, rf=0., display=True, mode='basic', sep=False, **kwargs): if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1: raise ValueError("`returns` must be a pandas Series, " "but a multi-column DataFrame was passed") if benchmark is not None: if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1: raise ValueError("`benchmark` must be a pandas Series, " "but a multi-column DataFrame was passed") blank = [''] df = _pd.DataFrame({"returns": _utils._prepare_returns(returns, rf)}) if benchmark is not None: blank = ['', ''] df["benchmark"] = _utils._prepare_benchmark(benchmark, returns.index, rf) df = df.dropna() # pct multiplier pct = 100 if display or "internal" in kwargs else 1 # return df dd = _calc_dd(df, display=(display or "internal" in kwargs)) metrics = _pd.DataFrame() s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]} s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]} s_rf = {'returns': rf} if "benchmark" in df: s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0] s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1] s_rf['benchmark'] = rf metrics['Start Period'] = _pd.Series(s_start) metrics['End Period'] = _pd.Series(s_end) metrics['Risk-free rate %'] = _pd.Series(s_rf) metrics['Exposure %%'] = _stats.exposure(df) * pct metrics['~'] = blank metrics['Cumulative Return %'] = _stats.comp(df) * pct metrics['CAGR%%'] = _stats.cagr(df, rf) * pct metrics['Sharpe'] = _stats.sharpe(df, rf) metrics['Sortino'] = _stats.sortino(df, rf) metrics['Max Drawdown %'] = blank metrics['Longest DD Days'] = blank if mode.lower() == 'full': ret_vol = _stats.volatility(df['returns']) * pct if "benchmark" in df: bench_vol = _stats.volatility(df['benchmark']) * pct metrics['Volatility (ann.) %'] = [ret_vol, bench_vol] metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark']) else: metrics['Volatility (ann.) %'] = [ret_vol] metrics['Calmar'] = _stats.calmar(df) metrics['Skew'] = _stats.skew(df) metrics['Kurtosis'] = _stats.kurtosis(df) if mode.lower() == 'full': metrics['~~~~~~~~~~'] = blank metrics['Expected Daily %%'] = _stats.expected_return(df) * pct metrics['Expected Monthly %%'] = _stats.expected_return( df, aggregate='M') * pct metrics['Expected Yearly %%'] = _stats.expected_return( df, aggregate='A') * pct metrics['Kelly Criterion %'] = _stats.kelly_criterion(df) * pct metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df) metrics['Daily Value-at-Risk %'] = -abs(_stats.var(df) * pct) metrics['Expected Shortfall (cVaR) %'] = -abs(_stats.cvar(df) * pct) metrics['~~~~~~'] = blank metrics['Payoff Ratio'] = _stats.payoff_ratio(df) metrics['Profit Factor'] = _stats.profit_factor(df) metrics['Common Sense Ratio'] = _stats.common_sense_ratio(df) metrics['CPC Index'] = _stats.cpc_index(df) metrics['Tail Ratio'] = _stats.tail_ratio(df) metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(df) metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(df) # returns metrics['~~'] = blank today = _dt.today() metrics['MTD %'] = _stats.comp( df[df.index >= _dt(today.year, today.month, 1)]) * pct d = today - _td(3 * 365 / 12) metrics['3M %'] = _stats.comp( df[df.index >= _dt(d.year, d.month, d.day)]) * pct d = today - _td(6 * 365 / 12) metrics['6M %'] = _stats.comp( df[df.index >= _dt(d.year, d.month, d.day)]) * pct metrics['YTD %'] = _stats.comp(df[df.index >= _dt(today.year, 1, 1)]) * pct d = today - _td(12 * 365 / 12) metrics['1Y %'] = _stats.comp( df[df.index >= _dt(d.year, d.month, d.day)]) * pct metrics['3Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(today.year - 3, today.month, today.day)]) * pct metrics['5Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(today.year - 5, today.month, today.day)]) * pct metrics['10Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(today.year - 10, today.month, today.day)]) * pct metrics['All-time (ann.) %'] = _stats.cagr(df) * pct # best/worst if mode.lower() == 'full': metrics['~~~'] = blank metrics['Best Day %'] = _stats.best(df) * pct metrics['Worst Day %'] = _stats.worst(df) * pct metrics['Best Month %'] = _stats.best(df, aggregate='M') * pct metrics['Worst Month %'] = _stats.worst(df, aggregate='M') * pct metrics['Best Year %'] = _stats.best(df, aggregate='A') * pct metrics['Worst Year %'] = _stats.worst(df, aggregate='A') * pct # dd metrics['~~~~'] = blank for ix, row in dd.iterrows(): metrics[ix] = row metrics['Recovery Factor'] = _stats.recovery_factor(df) metrics['Ulcer Index'] = _stats.ulcer_index(df, rf) # win rate if mode.lower() == 'full': metrics['~~~~~'] = blank metrics['Avg. Up Month %'] = _stats.avg_win(df, aggregate='M') * pct metrics['Avg. Down Month %'] = _stats.avg_loss(df, aggregate='M') * pct metrics['Win Days %%'] = _stats.win_rate(df) * pct metrics['Win Month %%'] = _stats.win_rate(df, aggregate='M') * pct metrics['Win Quarter %%'] = _stats.win_rate(df, aggregate='Q') * pct metrics['Win Year %%'] = _stats.win_rate(df, aggregate='A') * pct if mode.lower() == "full" and "benchmark" in df: metrics['~~~~~~~'] = blank greeks = _stats.greeks(df['returns'], df['benchmark']) metrics['Beta'] = [str(round(greeks['beta'], 2)), '-'] metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-'] # prepare for display for col in metrics.columns: try: metrics[col] = metrics[col].astype(float).round(2) if display or "internal" in kwargs: metrics[col] = metrics[col].astype(str) except Exception: pass if (display or "internal" in kwargs) and "%" in col: metrics[col] = metrics[col] + '%' metrics['Longest DD Days'] = _pd.to_numeric( metrics['Longest DD Days']).astype('int') metrics['Avg. Drawdown Days'] = _pd.to_numeric( metrics['Avg. Drawdown Days']).astype('int') if display or "internal" in kwargs: metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str) metrics['Avg. Drawdown Days'] = metrics[ 'Avg. Drawdown Days'].astype(str) metrics.columns = [ col if '~' not in col else '' for col in metrics.columns ] metrics.columns = [ col[:-1] if '%' in col else col for col in metrics.columns ] metrics = metrics.T if "benchmark" in df: metrics.columns = ['Strategy', 'Benchmark'] else: metrics.columns = ['Strategy'] if display: print(_tabulate(metrics, headers="keys", tablefmt='simple')) return if not sep: metrics = metrics[metrics.index != ''] return metrics
def tabulate(headers: List[str], rows: List[List[str]]): print(_tabulate(rows, headers=headers, tablefmt='simple'))
def metrics(returns, benchmark=None, rf=0., display=True, mode='basic', sep=False, compounded=True, periods_per_year=252, prepare_returns=True, match_dates=False, **kwargs): win_year, _ = _get_trading_periods(periods_per_year) if benchmark is not None \ and isinstance(benchmark, _pd.DataFrame) and len(benchmark.columns) > 1: raise ValueError("`benchmark` must be a pandas Series, " "but a multi-column DataFrame was passed") blank = [''] if isinstance(returns, _pd.DataFrame): if len(returns.columns) > 1: raise ValueError( "`returns` needs to be a Pandas Series or one column DataFrame. multi colums DataFrame was passed" ) returns = returns[returns.columns[0]] if prepare_returns: returns = _utils._prepare_returns(returns) df = _pd.DataFrame({"returns": returns}) if benchmark is not None: blank = ['', ''] benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf) if match_dates is True: returns, benchmark = _match_dates(returns, benchmark) df["returns"] = returns df["benchmark"] = benchmark df = df.fillna(0) # pct multiplier pct = 100 if display or "internal" in kwargs else 1 if kwargs.get("as_pct", False): pct = 100 # return df dd = _calc_dd(df, display=(display or "internal" in kwargs), as_pct=kwargs.get("as_pct", False)) metrics = _pd.DataFrame() s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]} s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]} s_rf = {'returns': rf} if "benchmark" in df: s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0] s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1] s_rf['benchmark'] = rf metrics['Start Period'] = _pd.Series(s_start) metrics['End Period'] = _pd.Series(s_end) metrics['Risk-Free Rate %'] = _pd.Series(s_rf) metrics['Time in Market %'] = _stats.exposure(df, prepare_returns=False) * pct metrics['~'] = blank if compounded: metrics['Cumulative Return %'] = (_stats.comp(df) * pct).map( '{:,.2f}'.format) else: metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format) metrics['CAGR﹪%'] = _stats.cagr(df, rf, compounded) * pct metrics['~~~~~~~~~~~~~~'] = blank metrics['Sharpe'] = _stats.sharpe(df, rf, win_year, True) if mode.lower() == 'full': metrics['Smart Sharpe'] = _stats.smart_sharpe(df, rf, win_year, True) metrics['Sortino'] = _stats.sortino(df, rf, win_year, True) if mode.lower() == 'full': metrics['Smart Sortino'] = _stats.smart_sortino(df, rf, win_year, True) metrics['Sortino/√2'] = metrics['Sortino'] / _sqrt(2) if mode.lower() == 'full': metrics['Smart Sortino/√2'] = metrics['Smart Sortino'] / _sqrt(2) metrics['Omega'] = _stats.omega(df, rf, 0., win_year) metrics['~~~~~~~~'] = blank metrics['Max Drawdown %'] = blank metrics['Longest DD Days'] = blank if mode.lower() == 'full': ret_vol = _stats.volatility( df['returns'], win_year, True, prepare_returns=False) * pct if "benchmark" in df: bench_vol = _stats.volatility( df['benchmark'], win_year, True, prepare_returns=False) * pct metrics['Volatility (ann.) %'] = [ret_vol, bench_vol] metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'], prepare_returns=False) else: metrics['Volatility (ann.) %'] = [ret_vol] metrics['Calmar'] = _stats.calmar(df, prepare_returns=False) metrics['Skew'] = _stats.skew(df, prepare_returns=False) metrics['Kurtosis'] = _stats.kurtosis(df, prepare_returns=False) metrics['~~~~~~~~~~'] = blank metrics['Expected Daily %%'] = _stats.expected_return( df, prepare_returns=False) * pct metrics['Expected Monthly %%'] = _stats.expected_return( df, aggregate='M', prepare_returns=False) * pct metrics['Expected Yearly %%'] = _stats.expected_return( df, aggregate='A', prepare_returns=False) * pct metrics['Kelly Criterion %'] = _stats.kelly_criterion( df, prepare_returns=False) * pct metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df, prepare_returns=False) metrics['Daily Value-at-Risk %'] = -abs( _stats.var(df, prepare_returns=False) * pct) metrics['Expected Shortfall (cVaR) %'] = -abs( _stats.cvar(df, prepare_returns=False) * pct) metrics['~~~~~~'] = blank metrics['Gain/Pain Ratio'] = _stats.gain_to_pain_ratio(df, rf) metrics['Gain/Pain (1M)'] = _stats.gain_to_pain_ratio(df, rf, "M") # if mode.lower() == 'full': # metrics['GPR (3M)'] = _stats.gain_to_pain_ratio(df, rf, "Q") # metrics['GPR (6M)'] = _stats.gain_to_pain_ratio(df, rf, "2Q") # metrics['GPR (1Y)'] = _stats.gain_to_pain_ratio(df, rf, "A") metrics['~~~~~~~'] = blank metrics['Payoff Ratio'] = _stats.payoff_ratio(df, prepare_returns=False) metrics['Profit Factor'] = _stats.profit_factor(df, prepare_returns=False) metrics['Common Sense Ratio'] = _stats.common_sense_ratio( df, prepare_returns=False) metrics['CPC Index'] = _stats.cpc_index(df, prepare_returns=False) metrics['Tail Ratio'] = _stats.tail_ratio(df, prepare_returns=False) metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio( df, prepare_returns=False) metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio( df, prepare_returns=False) # returns metrics['~~'] = blank comp_func = _stats.comp if compounded else _np.sum today = df.index[-1] # _dt.today() metrics['MTD %'] = comp_func( df[df.index >= _dt(today.year, today.month, 1)]) * pct d = today - _td(3 * 365 / 12) metrics['3M %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct d = today - _td(6 * 365 / 12) metrics['6M %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct metrics['YTD %'] = comp_func(df[df.index >= _dt(today.year, 1, 1)]) * pct d = today - _td(12 * 365 / 12) metrics['1Y %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct d = today - _td(3 * 365) metrics['3Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct d = today - _td(5 * 365) metrics['5Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct d = today - _td(10 * 365) metrics['10Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct metrics['All-time (ann.) %'] = _stats.cagr(df, 0., compounded) * pct # best/worst if mode.lower() == 'full': metrics['~~~'] = blank metrics['Best Day %'] = _stats.best(df, prepare_returns=False) * pct metrics['Worst Day %'] = _stats.worst(df, prepare_returns=False) * pct metrics['Best Month %'] = _stats.best( df, aggregate='M', prepare_returns=False) * pct metrics['Worst Month %'] = _stats.worst( df, aggregate='M', prepare_returns=False) * pct metrics['Best Year %'] = _stats.best( df, aggregate='A', prepare_returns=False) * pct metrics['Worst Year %'] = _stats.worst( df, aggregate='A', prepare_returns=False) * pct # dd metrics['~~~~'] = blank for ix, row in dd.iterrows(): metrics[ix] = row metrics['Recovery Factor'] = _stats.recovery_factor(df) metrics['Ulcer Index'] = _stats.ulcer_index(df) metrics['Serenity Index'] = _stats.serenity_index(df, rf) # win rate if mode.lower() == 'full': metrics['~~~~~'] = blank metrics['Avg. Up Month %'] = _stats.avg_win( df, aggregate='M', prepare_returns=False) * pct metrics['Avg. Down Month %'] = _stats.avg_loss( df, aggregate='M', prepare_returns=False) * pct metrics['Win Days %%'] = _stats.win_rate(df, prepare_returns=False) * pct metrics['Win Month %%'] = _stats.win_rate( df, aggregate='M', prepare_returns=False) * pct metrics['Win Quarter %%'] = _stats.win_rate( df, aggregate='Q', prepare_returns=False) * pct metrics['Win Year %%'] = _stats.win_rate( df, aggregate='A', prepare_returns=False) * pct if "benchmark" in df: metrics['~~~~~~~'] = blank greeks = _stats.greeks(df['returns'], df['benchmark'], win_year, prepare_returns=False) metrics['Beta'] = [str(round(greeks['beta'], 2)), '-'] metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-'] # prepare for display for col in metrics.columns: try: metrics[col] = metrics[col].astype(float).round(2) if display or "internal" in kwargs: metrics[col] = metrics[col].astype(str) except Exception: pass if (display or "internal" in kwargs) and "%" in col: metrics[col] = metrics[col] + '%' try: metrics['Longest DD Days'] = _pd.to_numeric( metrics['Longest DD Days']).astype('int') metrics['Avg. Drawdown Days'] = _pd.to_numeric( metrics['Avg. Drawdown Days']).astype('int') if display or "internal" in kwargs: metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str) metrics['Avg. Drawdown Days'] = metrics[ 'Avg. Drawdown Days'].astype(str) except Exception: metrics['Longest DD Days'] = '-' metrics['Avg. Drawdown Days'] = '-' if display or "internal" in kwargs: metrics['Longest DD Days'] = '-' metrics['Avg. Drawdown Days'] = '-' metrics.columns = [ col if '~' not in col else '' for col in metrics.columns ] metrics.columns = [ col[:-1] if '%' in col else col for col in metrics.columns ] metrics = metrics.T if "benchmark" in df: metrics.columns = ['Strategy', 'Benchmark'] else: metrics.columns = ['Strategy'] if display: print(_tabulate(metrics, headers="keys", tablefmt='simple')) return None if not sep: metrics = metrics[metrics.index != ''] return metrics
def option_help(mode='string', qtype=None, tablefmt='simple'): """Print a sting to stdout displaying information on all options. Args: mode (str): string: Return a formatted string print: Print the string to stdout list: Return a simple list of keywords table: Return a table of lists merged_table: Combine all keywords into a single table qtype (str): If provided only return info on that queue type. tablefmt (str): A tabulate-style table format, one of:: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex', 'latex_booktabs' Returns: str: A formatted string """ hlp = OrderedDict() # Explicitly get the function call help out of core to treat separately common = COMMON.copy() impts = common.pop('imports') hlp['common'] = { 'summary': 'Used in every mode', 'help': common, } hlp['func'] = { 'summary': 'Used for function calls', 'help': OrderedDict([('imports', impts)]), } hlp['local'] = { 'summary': 'Used only in local mode', 'help': NORMAL, } # Include all cluster options in one cluster = CLUSTER_CORE.copy() cluster.update(CLUSTER_OPTS) hlp['cluster'] = { 'summary': 'Options that work in both slurm and torque', 'help': cluster, } if TORQUE: hlp['torque'] = { 'summary': "Used for torque only", 'help': TORQUE, } if SLURM: hlp['slurm'] = { 'summary': "Used for slurm only", 'help': SLURM, } if qtype: if qtype == 'local': hlp.pop('cluster') hlp.pop('torque') hlp.pop('slurm') elif qtype == 'slurm': hlp.pop('torque') elif qtype == 'torque': hlp.pop('slurm') else: raise ClusterError('qtype must be "torque", "slurm", or "local"') if mode == 'print' or mode == 'string': outstr = '' for hlp_info in hlp.values(): tmpstr = '' for option, inf in hlp_info['help'].items(): default = inf['default'] if 'default' in inf else None typ = inf['type'] helpitems = _wrap(inf['help']) helpstr = helpitems[0] if len(helpitems) > 1: hstr = '\n' + ' ' * 15 helpstr += hstr helpstr += hstr.join(helpitems[1:]) if isinstance(typ, (tuple, list, set)): typ = [t.__name__ for t in typ] else: typ = typ.__name__ tmpstr += ( '{o:<15}{h}\n{s:<15}Type: {t}; Default: {d}\n'.format( o=option + ':', h=helpstr, s=' ', t=typ, d=default)) outstr += '{}::\n{}\n'.format(hlp_info['summary'], tmpstr) outstr = outstr.rstrip() + '\n' if mode == 'print': sys.stdout.write(outstr) else: return outstr elif mode == 'table': tables = OrderedDict() for sect, ddct in hlp.items(): summary = '{}: {}'.format(sect.title(), ddct['summary']) outtable = [['Option', 'Description', 'Type', 'Default']] dct = ddct['help'] for opt, inf in dct.items(): if isinstance(inf['type'], (tuple, list, set)): typ = [t.__name__ for t in inf['type']] else: typ = inf['type'].__name__ outtable.append([opt, inf['help'], typ, str(inf['default'])]) tables[summary] = outtable tables['Synonyms'] = [['Synonym', 'Option'] ] + [list(i) for i in SYNONYMS.items()] out_string = '' for section, table in tables.items(): out_string += '\n' + section + '\n' out_string += '-' * len(section) + '\n\n' out_string += _tabulate( table, headers='firstrow', tablefmt=tablefmt) + '\n\n' return out_string elif mode == 'merged_table': table = [] headers = ['Option', 'Description', 'Type', 'Default', 'Section'] for sect, ddct in hlp.items(): dct = ddct['help'] for opt, inf in dct.items(): if isinstance(inf['type'], (tuple, list, set)): typ = [t.__name__ for t in inf['type']] else: typ = inf['type'].__name__ table.append( [opt, inf['help'], typ, str(inf['default']), sect]) out_string = _tabulate(table, headers=headers, tablefmt=tablefmt) + '\n\n' out_string += 'Synonyms\n' out_string += '-' * 8 + '\n\n' out_string += _tabulate([list(i) for i in SYNONYMS.items()], headers=['Synonym', 'Option'], tablefmt=tablefmt) return out_string elif mode == 'list': return '\n'.join(['\n'.join(i['help'].keys()) for i in hlp.values()]) else: raise ClusterError('mode must be "print", "string", or "table"')