コード例 #1
0
    def _print_results(self, show_passed=False):
        """Print linting results to the command line.

        Uses the ``rich`` library to print a set of formatted tables to the command line
        summarising the linting results.
        """

        log.debug("Printing final results")
        console = Console(force_terminal=nf_core.utils.rich_force_colors())

        # Helper function to format test links nicely
        def format_result(test_results, table):
            """
            Given an list of error message IDs and the message texts, return a nicely formatted
            string for the terminal with appropriate ASCII colours.
            """
            for eid, msg in test_results:
                table.add_row(
                    Markdown(
                        "[{0}](https://nf-co.re/tools-docs/lint_tests/{0}.html): {1}"
                        .format(eid, msg)))
            return table

        def _s(some_list):
            if len(some_list) != 1:
                return "s"
            return ""

        # Table of passed tests
        if len(self.passed) > 0 and show_passed:
            table = Table(style="green", box=rich.box.ROUNDED)
            table.add_column(r"[✔] {} Test{} Passed".format(
                len(self.passed), _s(self.passed)),
                             no_wrap=True)
            table = format_result(self.passed, table)
            console.print(table)

        # Table of fixed tests
        if len(self.fixed) > 0:
            table = Table(style="bright_blue", box=rich.box.ROUNDED)
            table.add_column(r"[?] {} Test{} Fixed".format(
                len(self.fixed), _s(self.fixed)),
                             no_wrap=True)
            table = format_result(self.fixed, table)
            console.print(table)

        # Table of ignored tests
        if len(self.ignored) > 0:
            table = Table(style="grey58", box=rich.box.ROUNDED)
            table.add_column(r"[?] {} Test{} Ignored".format(
                len(self.ignored), _s(self.ignored)),
                             no_wrap=True)
            table = format_result(self.ignored, table)
            console.print(table)

        # Table of warning tests
        if len(self.warned) > 0:
            table = Table(style="yellow", box=rich.box.ROUNDED)
            table.add_column(r"[!] {} Test Warning{}".format(
                len(self.warned), _s(self.warned)),
                             no_wrap=True)
            table = format_result(self.warned, table)
            console.print(table)

        # Table of failing tests
        if len(self.failed) > 0:
            table = Table(style="red", box=rich.box.ROUNDED)
            table.add_column(r"[✗] {} Test{} Failed".format(
                len(self.failed), _s(self.failed)),
                             no_wrap=True)
            table = format_result(self.failed, table)
            console.print(table)

        # Summary table
        summary_colour = "red" if len(self.failed) > 0 else "green"
        table = Table(box=rich.box.ROUNDED, style=summary_colour)
        table.add_column(f"LINT RESULTS SUMMARY".format(len(self.passed)),
                         no_wrap=True)
        table.add_row(r"[green][✔] {:>3} Test{} Passed".format(
            len(self.passed), _s(self.passed)))
        if len(self.fix):
            table.add_row(r"[bright blue][?] {:>3} Test{} Fixed".format(
                len(self.fixed), _s(self.fixed)))
        table.add_row(r"[grey58][?] {:>3} Test{} Ignored".format(
            len(self.ignored), _s(self.ignored)))
        table.add_row(r"[yellow][!] {:>3} Test Warning{}".format(
            len(self.warned), _s(self.warned)))
        table.add_row(r"[red][✗] {:>3} Test{} Failed".format(
            len(self.failed), _s(self.failed)))
        console.print(table)

        if len(self.could_fix):
            fix_cmd = "nf-core lint {} --fix {}".format(
                self.wf_path, " --fix ".join(self.could_fix))
            console.print(
                f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue]    {fix_cmd}\n"
            )
        if len(self.fix):
            console.print(
                "Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'."
            )
コード例 #2
0
                        help="Set number of processes",
                        type=int,
                        default=8)
    console.print("!!!!! Time Aggregation is in milliseconds !!!!! ")
    args = parser.parse_args()

    # Handle the directory of pcaps
    directory_p = args.directory
    pcap_app = recursive_files(directory_p)
    if (pcap_app == -1):
        raise NameError("Inserted file not valid")

    # Set number of processes as number of pcaps
    n_process = set_n_process(pcap_app, args.process)
    table = Table(show_header=True,
                  header_style="bold magenta",
                  box=box.HORIZONTALS,
                  show_footer=True)
    table.add_column(
        "Pcap(s) to elaborate:",
        justify="center",
        footer=
        f"[bold magenta]N. worker:[/] [cornflower_blue bold]{n_process}[/], [bold magenta]PID main:[/bold magenta] [cornflower_blue bold]{os.getpid()}[/]"
    )
    for i in pcap_app:
        table.add_row(i, style="cornflower_blue bold")
    console.print(table)

    # For each .pcap in the folders, do the process
    manager = multiprocessing.Manager()
    result_list = manager.list()
    # creation of general log
コード例 #3
0
                            border_style)
                    yield new_line

        if _box and show_edge:
            yield _Segment(_box.get_bottom(widths), border_style)
            yield new_line


if __name__ == "__main__":  # pragma: no cover
    from rich.console import Console
    from rich.highlighter import ReprHighlighter
    from rich.table import Table

    table = Table(
        title="Star Wars Movies",
        caption="Rich example table",
        caption_justify="right",
    )

    table.add_column("Released",
                     header_style="bright_cyan",
                     style="cyan",
                     no_wrap=True)
    table.add_column("Title", style="magenta")
    table.add_column("Box Office", justify="right", style="green")

    table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker",
                  "$952,110,690")
    table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
    table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi",
                  "$1,332,539,889")
コード例 #4
0
ファイル: __main__.py プロジェクト: sAksham-Ar/cricketpy
def cricpy():
    c = Cricbuzz()
    f = Figlet(font='slant')
    console = Console()

    print(f.renderText('CRICPY'))
    while 1:
        matches = c.livescore()
        print_scores(matches)
        choice = input()
        if choice == 'q':
            os.system('clear')
            exit()
        elif choice == 'r':
            continue
        else:
            while 1:
                os.system('clear')
                matches = c.livescore()
                match = matches[int(choice) - 1]
                single_score_table = Table(show_header=False,
                                           show_lines=True,
                                           expand=True)
                single_score_table.add_column()
                single_score_table.add_row(get_score_row(match))
                console.print(single_score_table)
                commentary = c.commentary(match['id'])

                if len(commentary['batsman']):
                    batsmens = commentary['batsman']
                    current_batsmen_table = Table("Batsman",
                                                  "R",
                                                  "B",
                                                  "4s",
                                                  "6s",
                                                  "SR",
                                                  expand=True)
                    for batsmen in batsmens:
                        current_batsmen_table = get_batsmen_row(
                            batsmen, current_batsmen_table)
                    console.print(current_batsmen_table)
                    current_bowler_table = Table("Bowler",
                                                 "O",
                                                 "M",
                                                 "R",
                                                 "W",
                                                 "ER",
                                                 expand=True)
                    bowler = commentary['bowler'][0]
                    current_bowler_table = get_bowler_row(
                        bowler, current_bowler_table)
                    console.print(current_bowler_table)

                commentary = commentary['comm']
                commentary_table = Table(show_header=False,
                                         padding=(1, 0, 0, 0),
                                         expand=True)
                for comment in commentary:
                    commentary_table = get_commentary_row(
                        comment, commentary_table)
                commentary_table.add_row('s:scorecard,b:back,r:refresh,q:quit')
                console.print(commentary_table)

                ch = input()
                if ch == 'r':
                    continue
                elif ch == 'b':
                    break
                elif ch == 'q':
                    os.system('clear')
                    exit()
                elif ch == 's':
                    scorecard = c.scorecard(match['id'])
                    inning = scorecard[0]
                    while 1:
                        os.system('clear')
                        batsmens = inning['batcard']

                        batsmen_table = Table("Batsman",
                                              "Dismissal",
                                              "R",
                                              "B",
                                              "4s",
                                              "6s",
                                              "SR",
                                              expand=True)
                        for batsmen in batsmens:
                            batsmen_table = get_batsmen_row(
                                batsmen, batsmen_table)
                        console.print(batsmen_table)

                        bowler_table = Table("Bowler",
                                             "O",
                                             "M",
                                             "R",
                                             "W",
                                             "ER",
                                             expand=True)
                        bowlers = inning['bowlcard']
                        for bowl in bowlers:
                            bowler_table = get_bowler_row(bowl, bowler_table)
                        console.print(bowler_table)

                        fall_wickets_table = Table("Fall of Wickets",
                                                   "Score",
                                                   "Over",
                                                   expand=True)
                        fall_wickets = inning['fall_wickets']
                        for fall_wicket in fall_wickets:
                            name = fall_wicket['name']
                            wicket = fall_wicket['wkt_num']
                            score = fall_wicket['score']
                            overs = fall_wicket['overs']
                            score = score + '-' + wicket
                            fall_wickets_table.add_row(name, score, overs)
                        console.print(fall_wickets_table)
                        print(
                            'inning number:inning scorecard,b:back,r:refresh,q:quit'
                        )

                        chh = input()
                        if chh == 'b':
                            break
                        elif chh == 'r':
                            continue
                        elif chh == 'q':
                            os.system('clear')
                            exit()
                        else:
                            try:
                                inning = scorecard[int(chh) - 1]
                            except:
                                print("wrong innings")
                            continue
コード例 #5
0
ファイル: default_styles.py プロジェクト: sthagen/python-rich
    Style(color="magenta"),
    "iso8601.timezone":
    Style(color="yellow"),
}

if __name__ == "__main__":  # pragma: no cover
    import argparse
    import io

    from rich.console import Console
    from rich.table import Table
    from rich.text import Text

    parser = argparse.ArgumentParser()
    parser.add_argument("--html",
                        action="store_true",
                        help="Export as HTML table")
    args = parser.parse_args()
    html: bool = args.html
    console = Console(record=True, width=70,
                      file=io.StringIO()) if html else Console()

    table = Table("Name", "Styling")

    for style_name, style in DEFAULT_STYLES.items():
        table.add_row(Text(style_name, style=style), str(style))

    console.print(table)
    if html:
        print(console.export_html(inline_styles=True))
コード例 #6
0
ファイル: beets_buddy.py プロジェクト: stvnrlly/dotfiles
console = Console()

console.print('gathering data...', justify='center')

while True:

    # populated by beets_stats.sh
    stats = Path('/Users/stvnrlly/dev/dotfiles/data/stats').read_text().splitlines()
    flac_stats = Path('/Users/stvnrlly/dev/dotfiles/data/flac_stats').read_text().splitlines()
    mp3_stats = Path('/Users/stvnrlly/dev/dotfiles/data/mp3_stats').read_text().splitlines()
    missing = Path('/Users/stvnrlly/dev/dotfiles/data/missing').read_text().splitlines()

    non_format = int(stats[4].split(':')[1].strip()) - (int(flac_stats[4].split(':')[1].strip()) + int(mp3_stats[4].split(':')[1].strip()))

    console.clear()

    table = Table(box=box.SIMPLE)
    table2 = Table(box=box.SIMPLE)

    # print(stats)
    table.add_row('Total albums', stats[4].split(':')[1].strip(), stats[2].split(':')[1].strip())
    table.add_row('FLAC albums', flac_stats[4].split(':')[1].strip(), flac_stats[2].split(':')[1].strip())
    table.add_row('MP3 albums', mp3_stats[4].split(':')[1].strip(), mp3_stats[2].split(':')[1].strip())
    table2.add_row('Incomplete albums', str(len(missing)))
    table2.add_row('Other formats', str(non_format))

    # window.select_pane(2)
    console.print(table, justify='center')
    console.print(table2, justify='center')

    time.sleep(120)
コード例 #7
0
def find_mismatched_dashes(plain_output: bool) -> int:
    """
	Entry point for `se find-mismatched-dashes`
	"""

    parser = argparse.ArgumentParser(
        description=
        "Find words with mismatched dashes in a set of XHTML files. For example, `extra-physical` in one file and `extraphysical` in another."
    )
    parser.add_argument(
        "targets",
        metavar="TARGET",
        nargs="+",
        help="an XHTML file, or a directory containing XHTML files")
    args = parser.parse_args()

    console = Console(
        highlight=False, theme=se.RICH_THEME
    )  # Syntax highlighting will do weird things when printing paths
    return_code = 0
    dashed_words: Dict[str, int] = {}  # key: word; value: count
    mismatches: Dict[str, Dict[str, Tuple[int, int]]] = {
    }  # key: base word; value: dict with key: plain word; value: (base count, plain count)
    target_filenames = se.get_target_filenames(args.targets, ".xhtml")
    files_xhtml = []

    # Read files and cache for later
    for filename in target_filenames:
        try:
            with open(filename, "r", encoding="utf-8") as file:
                xhtml = file.read()
                dom = se.easy_xml.EasyXmlTree(xhtml)

                # Save any `alt` and `title` attributes because we may be interested in their contents
                for node in dom.xpath("//*[@alt or @title]"):
                    for _, value in node.attrs.items():
                        xhtml = xhtml + f" {value} "

                # Strip tags
                xhtml = regex.sub(r"<[^>]+?>", " ", xhtml)

                files_xhtml.append(xhtml)

        except FileNotFoundError:
            se.print_error(
                f"Couldn’t open file: [path][link=file://{filename}]{filename}[/][/].",
                plain_output=plain_output)
            return_code = se.InvalidInputException.code

    # Create a list of dashed words
    for xhtml in files_xhtml:
        # This regex excludes words with three dashes like `bric-a-brac`, because removing dashes
        # may erroneously match regular words. Don't match `’` to prevent matches like `life’s-end` -> `s-end`
        for word in regex.findall(r"(?<![\-’])\b\w+\-\w+\b(?![\-’])", xhtml):
            lower_word = word.lower()

            if lower_word in dashed_words:
                dashed_words[lower_word] = dashed_words[lower_word] + 1
            else:
                dashed_words[lower_word] = 1

    # Now iterate over the list and search files for undashed versions of the words
    if dashed_words:
        for xhtml in files_xhtml:
            for dashed_word, count in dashed_words.items():
                plain_word = dashed_word.replace("-", "")

                matches = regex.findall(fr"\b{plain_word}\b",
                                        xhtml,
                                        flags=regex.IGNORECASE)

                if matches:
                    if dashed_word in mismatches:
                        if plain_word in mismatches[dashed_word]:
                            mismatches[dashed_word][plain_word] = (
                                count, mismatches[dashed_word][plain_word][1] +
                                len(matches))
                        else:
                            mismatches[dashed_word][plain_word] = (
                                count, len(matches))

                    else:
                        mismatches[dashed_word] = {}
                        mismatches[dashed_word][plain_word] = (count,
                                                               len(matches))

    # Sort and prepare the output
    lines = []

    for dashed_word, child in mismatches.items():
        for plain_word, counts in child.items():
            lines.append((dashed_word, counts[0], plain_word, counts[1]))

    lines.sort()

    if lines:
        if plain_output:
            for dashed_word, dashed_word_count, plain_word, plain_word_count in lines:
                console.print(
                    f"{dashed_word} ({dashed_word_count})\t{plain_word} ({plain_word_count})"
                )

        else:
            table = Table(show_header=False,
                          show_lines=True,
                          box=box.HORIZONTALS)
            table.add_column("Dashed word")
            table.add_column("Count", style="dim", no_wrap=True)
            table.add_column("Plain word")
            table.add_column("Count", style="dim", no_wrap=True)

            for dashed_word, dashed_word_count, plain_word, plain_word_count in lines:
                table.add_row(
                    f"[link=https://www.merriam-webster.com/dictionary/{urllib.parse.quote(dashed_word)}]{dashed_word}[/]",
                    f"({dashed_word_count})",
                    f"[link=https://www.merriam-webster.com/dictionary/{urllib.parse.quote(plain_word)}]{plain_word}[/]",
                    f"({plain_word_count})")

            console.print(table)

    return return_code
コード例 #8
0
def recommend_games():
    # get print console
    console = Console()

    # === Get User's Review Data ===
    print(f'::  Getting Steam user review data and gathering game metadata...')
    # Get Data
    df = pd.read_excel(
        '~/gdrive/video_games/reviews/reviews_and_wishlist.xlsx', skiprows=2)
    df = df[df['Steam AppID'].notnull()]
    df['Steam AppID'] = df['Steam AppID'].astype(int)
    df = df.set_index('Steam AppID')

    # Table - Rated Games
    rated_games_table = Table(title="Rated Games",
                              show_header=True,
                              header_style="bold purple")
    rated_games_table.add_column("Steam AppId", style="cyan")
    rated_games_table.add_column("Game")
    rated_games_table.add_column("Score", justify="right", style="bold")
    for i, row in df.iterrows():
        # Color according to score value
        score_string = str(row.Score)
        if row.Score >= 7:
            score_string = "[green]" + score_string
        elif row.Score < 4:
            score_string = "[red]" + score_string
        rated_games_table.add_row(str(row.name), row.Game, score_string)
    console.print(rated_games_table)

    # Keep Relevant Cols from review excel
    df = df[['Game', 'Score']]

    # Add Reviews metadata cols
    df['Recent Percent'] = 0
    df['All Percent'] = 0
    for index in track(
            df.index,
            description=
            'Getting Steam Recent and All Review Percentages for Rated Games'):
        recent_p, _, all_p, _ = get_appid_reviews(index)
        df.at[index, 'Recent Percent'] = recent_p
        df.at[index, 'All Percent'] = all_p

    # Get Tags
    tags_dict = {}
    for index in track(df.index.values,
                       description='Getting Steam Tags for Rated Games'):
        tags = get_appid_tags(index)
        tags = list(
            dict.fromkeys([
                TAGS_MAP[tag] if tag in TAGS_MAP.keys() else tag
                for tag in tags
            ]))  # Map specific tags to more general ones
        tags_dict[index] = tags[:NUM_OF_TAGS]

    # Get developers
    devs_dict = {}
    for index in track(df.index.values,
                       description='Getting Steam Developers for Rated Games'):
        devs_dict[index] = get_appid_developers(index)

    # Get unique values lists
    UNIQUE_TAGS = sorted(list(set().union(*list(tags_dict.values()))))
    UNIQUE_DEVS = sorted(list(set().union(*list(devs_dict.values()))))

    # Panel - Unique Tags
    print(Panel(Columns(UNIQUE_TAGS, equal=True),
                title="Unique Training Tags"))

    # Create tag columns
    for tag in UNIQUE_TAGS:
        df[tag] = 0

    # Map tag rank to df
    for index, row in track(
            df.iterrows(),
            description='Creating Tag columns and ranking based on importance',
            total=len(df)):
        tags = tags_dict[index]
        for tag, rank in zip(tags, np.arange(len(tags), 0, -1)):
            df.at[index, tag] = int(rank)  # For Importance Ranking
            #df.at[index, tag] = 1  # Binary Has/Not Has Flag

    # Create dev columns
    for dev in UNIQUE_DEVS:
        df[dev] = 0

    # Map tag rank to df
    for index, row in track(df.iterrows(),
                            description='Creating Developer columns',
                            total=len(df)):
        devs = devs_dict[index]
        for dev in devs:
            df.at[index, dev] = 1  # Binary Has/Not Has Flag

    # === Creating training dataframes ===
    print(f'::  Creating training set dataframes...')

    ids = df['Game']
    y = df['Score']
    X = df.drop(['Game', 'Score'], axis=1)

    # == Add Content Similarity Score ==
    X_arr = X.drop(['Recent Percent', 'All Percent'], axis=1)
    tag_columns = X_arr.columns
    X_arr = X_arr.astype(float).values
    y_arr = y.astype(float).values

    # Get Pseudo Weighted Average by rating for items
    for i in range(0, len(y_arr)):
        X_arr[i] = X_arr[i] * y_arr[i]
    u_profile = X_arr.sum(axis=0)
    u_profile = (u_profile / np.sqrt(np.sum(u_profile**2))) * NUM_OF_TAGS
    u_profile = pd.Series(u_profile, index=tag_columns)

    # Get cosine sim between weighted average profile and X items
    cosine_scores = []
    for i, row in X.iterrows():
        x = row[2:].values
        sim = np.dot(u_profile.values, x) / (np.linalg.norm(u_profile.values) *
                                             np.linalg.norm(x))
        sim = np.round(sim, 4)
        cosine_scores.append(sim)
    X['User Profile Similarity'] = cosine_scores

    # === Auto Hyperparam Tuning and Model training ===
    print(f'::  Tuning hyperparameters and training recommender...')
    objective = ['reg:squarederror']
    max_depth = [int(x) for x in np.linspace(0, 15, num=16) if x != 0]
    n_estimators = [
        int(x) for x in np.linspace(start=0, stop=500, num=501) if x != 0
    ]
    random_grid = {
        'objective': objective,
        'max_depth': max_depth,
        'n_estimators': n_estimators,
    }
    model = XGBRegressor(random_state=42, verbosity=0, n_jobs=-1)
    search = GridSearchCV(estimator=model,
                          param_grid=random_grid,
                          scoring='neg_mean_squared_error',
                          cv=3,
                          verbose=1,
                          n_jobs=-1)
    search.fit(X, y)
    print(search.best_params_)

    # Fit Model
    model = XGBRegressor(**search.best_params_,
                         random_state=42,
                         verbosity=0,
                         n_jobs=-1)
    model.fit(X, y)

    # Get Cross Val Score
    scores = cross_val_score(model,
                             X,
                             y,
                             scoring='neg_mean_squared_error',
                             cv=5)
    print(f' Avg. MSE: {scores.mean():0.4f} (+/- {scores.std():0.4f})')

    # === Getting user library and wishlist games to generate recommendations ===
    print(
        f'::  Getting Steam user library and wishlist games to generate recommendations...'
    )
    # Get Test Data
    df_test = pd.concat([get_library_df(), get_wishlist_df()])
    df_test = df_test.drop([str(x) for x in df.index.values], errors='ignore')

    # Grab model tag input
    for tag in UNIQUE_TAGS:
        df_test[tag] = 0

    # Get tag ranks for tags that exist in model input
    for index in track(
            df_test.index.values,
            description='Getting test tags and mapping to train tag inputs'):
        tags = get_appid_tags(index)
        tags = list(
            dict.fromkeys([
                TAGS_MAP[tag] if tag in TAGS_MAP.keys() else tag
                for tag in tags
            ]))  # Map specific tags to more general ones
        tags = tags[:NUM_OF_TAGS]

        for tag, rank in zip(tags, np.arange(len(tags), 0, -1)):
            if tag in UNIQUE_TAGS:
                df_test.at[index, tag] = int(rank)  # For Importance Ranking
                #df_test.at[index, tag] = 1  # Binary Has/Not Has Flag
            else:
                print(f'tag "{tag}" not in input -- ignoring')
                pass

    # Grab model dev input
    for dev in UNIQUE_DEVS:
        df_test[dev] = 0

    # Get tag ranks for tags that exist in model input
    for index in track(
            df_test.index.values,
            description=
            'Getting test developers and mapping to train developer inputs'):
        devs = get_appid_developers(index)
        for dev in devs:
            if dev in UNIQUE_DEVS:
                df_test.at[index, dev] = 1  # Binary Has/Not Has Flag
            else:
                print(f'developer "{dev}" not in input -- ignoring')
                pass

    # === Recommendations ===
    print(f'::  Getting recommendations...')
    # Get X Test df
    df_test = df_test[df_test['Is DLC'] == False]  # Filter to games
    test_ids = df_test.index.values
    test_names = df_test['Game']
    test_owned = df_test['Is Owned']
    X_test = df_test.drop(['Game', 'Is Owned', 'Is DLC'], axis=1)

    # Calculate User Profile Sim Scores
    cosine_scores = []
    for i, row in X_test.iterrows():
        x = row[2:].astype(float).values
        sim = np.dot(u_profile.values, x) / (np.linalg.norm(u_profile.values) *
                                             np.linalg.norm(x))
        sim = np.round(sim, 4)
        cosine_scores.append(sim)
    X_test['User Profile Similarity'] = cosine_scores

    # Get predictions
    test_preds = model.predict(X_test)

    # Formulate Output
    output_data = {
        'Steam AppId': test_ids,
        'Game': test_names,
        'Is Owned': test_owned,
        'User Profile Similarity': X_test['User Profile Similarity'].values,
        'Predicted Score': test_preds
    }
    output_df = pd.DataFrame(output_data).sort_values('Predicted Score',
                                                      ascending=False)

    # Table - UProfile Tags
    u_profile = u_profile.sort_values(ascending=False)
    u_profile_table = Table(title="User Tags Profile",
                            show_header=True,
                            header_style="bold purple")
    u_profile_table.add_column("Tag")
    u_profile_table.add_column("Estimated Value",
                               justify="right",
                               style="bold")
    for index, value in u_profile.items():
        u_profile_table.add_row(index, f'{value:0.2f}')
    console.print(u_profile_table)

    # Table - All Games Predicted Score
    sim_mean = output_df['User Profile Similarity'].mean()
    sim_std = output_df['User Profile Similarity'].std()
    predicted_games_table = Table(
        title="Predicted Score for Steam Library and Wishlist Games",
        show_header=True,
        header_style="bold purple")
    predicted_games_table.add_column("Steam AppId", style="cyan")
    predicted_games_table.add_column("Game")
    predicted_games_table.add_column("User Profile Similarity",
                                     justify="right",
                                     style="bold")
    predicted_games_table.add_column("Predicted Score",
                                     justify="right",
                                     style="bold")
    for i, row in output_df.iterrows():
        # Color score value
        score_string = str(np.round(row['Predicted Score'], 2))
        if row['Predicted Score'] >= 7:
            score_string = "[green]" + score_string
        elif row['Predicted Score'] < 4:
            score_string = "[red]" + score_string

        # Color Similarity
        sim_string = f'{row["User Profile Similarity"]*100:0.2f}%'
        if row['User Profile Similarity'] >= (sim_mean + sim_std):
            sim_string = "[green]" + sim_string
        elif row['User Profile Similarity'] <= (sim_mean - sim_std):
            sim_string = "[red]" + sim_string

        predicted_games_table.add_row(row['Steam AppId'], row.Game, sim_string,
                                      score_string)
    console.print(predicted_games_table)

    return model, output_df, X_test
コード例 #9
0
weight_volume_ratio = format(mass / convertUp(volume), ".2%")
# ppm = mg/kg or mg/L
ppm = convertUp(mass) / volume
data = molConvert(mass, volume, molarMass, firstMolAmount, secondMolAmount)

# Convert items to scientific notation for display purposes
for item in data:
    item = sciNotation(item)

mol = data[0]
mol_L = data[1]
firstElementConc = data[2]
secondElementConc = data[3]

# Generate columns
table = Table(title="Calculations", show_lines=True)
table.add_column("Title")
table.add_column("Result", style="green")
# Generate rows and fill with data + "units"
table.add_row("Percentage Concentration", weight_volume_ratio)
table.add_row("Parts Per Million", str(ppm) + " ppm")
table.add_row("Concentration", str(mol_L) + " mol/L")
table.add_row("Concentration of " + firstElement,
              str(firstElementConc) + " mol/L")
table.add_row("Concentration of " + secondElement,
              str(secondElementConc) + " mol/L")

# Print generated table
console = Console()
console.print(table)
コード例 #10
0
ファイル: exegol.py プロジェクト: 11-BANG-BANG/Exegol
def info_images():
    images = []
    logger.info("Available images")
    remote_images = {}
    logger.debug("Fetching remote image tags, digests and sizes")
    try:
        remote_images_request = requests.get(
            url="https://hub.docker.com/v2/repositories/{}/tags".format(
                IMAGE_NAME),
            timeout=(5, 10))
        remote_images_list = json.loads(remote_images_request.text)
        for image in remote_images_list["results"]:
            tag = image["name"]
            digest = image["images"][0]["digest"]
            compressed_size = readable_size(image["full_size"])
            logger.debug("└── {} → {}...".format(tag, digest[:32]))
            remote_images[digest] = {
                "tag": tag,
                "compressed_size": compressed_size
            }
        notinstalled_remote_images = remote_images
        logger.debug(
            "Fetching local image tags, digests (and other attributes)")
        local_images_list = client.images.list(IMAGE_NAME,
                                               filters={"dangling": False})
        for image in local_images_list:
            id = image.attrs["Id"].split(":")[1][:12]
            if not image.attrs["RepoTags"]:
                # TODO: investigate this, print those images as "layers"
                #  these are layers for other images
                real_size = readable_size(image.attrs["Size"])
                digest = image.attrs["Id"].replace("sha256:", "")
                images.append([id, "<none>", real_size, "local layer"])
            else:
                name, tag = image.attrs["RepoTags"][0].split(':')
                real_size = readable_size(image.attrs["Size"])

                if image.attrs[
                        "RepoDigests"]:  # If true, the image was pulled instead of built
                    digest = image.attrs["RepoDigests"][0].replace(
                        "{}@".format(IMAGE_NAME), "")

                    logger.debug("└── {} → {}...".format(tag, digest[:32]))
                    if digest in remote_images.keys():
                        images.append([
                            id, tag, real_size, "remote ({}, {})".format(
                                "[green]up to date[/green]",
                                remote_images[digest]["compressed_size"])
                        ])
                        notinstalled_remote_images.pop(digest)
                    else:
                        for key in remote_images:
                            if remote_images[key]["tag"] == tag:
                                remote_digest = key
                                break
                            else:  # This means the image was pulled but it doesn't exist anymore on DockerHub
                                remote_digest = ""
                        if remote_digest:
                            compressed_size = remote_images[remote_digest][
                                "compressed_size"]
                            images.append([
                                id, tag, real_size, "remote ({}, {})".format(
                                    "[orange3]deprecated[/orange3]",
                                    compressed_size)
                            ])
                            notinstalled_remote_images.pop(remote_digest)
                        else:
                            images.append([
                                id, tag, real_size, "remote ({})".format(
                                    "[bright_black]discontinued["
                                    "/bright_black]")
                            ])
                else:
                    images.append([id, tag, real_size, "local image"])
        for uninstalled_remote_image in notinstalled_remote_images.items():
            tag = uninstalled_remote_image[1]["tag"]
            compressed_size = uninstalled_remote_image[1]["compressed_size"]
            id = uninstalled_remote_image[0].split(":")[1][:12]
            images.append([
                id, tag, "[bright_black]N/A[/bright_black]",
                "remote ({}, {})".format("[yellow3]not installed[/yellow3]",
                                         compressed_size)
            ])
        images = sorted(images, key=lambda k: k[1])
        if options.verbosity == 0:
            table = Table(show_header=True,
                          header_style="bold blue",
                          border_style="blue",
                          box=box.SIMPLE)
            table.add_column("Image tag")
            table.add_column("Real size")
            table.add_column("Type")
            for image in images:
                if image[1] != "<none>":
                    table.add_row(image[1], image[2], image[3])
        elif options.verbosity >= 1:
            table = Table(show_header=True,
                          header_style="bold blue",
                          border_style="grey35",
                          box=box.SQUARE)
            table.add_column("Id")
            table.add_column("Image tag")
            table.add_column("Real size")
            table.add_column("Type")
            for image in images:
                table.add_row(image[0], image[1], image[2], image[3])
        console.print(table)
        print()
    except requests.exceptions.ConnectionError:
        logger.warning(
            "Connection Error: you probably have no internet, skipping online queries"
        )
コード例 #11
0
ファイル: exegol.py プロジェクト: 11-BANG-BANG/Exegol
def info_containers():
    len_containers = len(
        client.containers.list(all=True, filters={"name": "exegol-"}))
    logger.info("Available local containers: {}".format(len_containers))
    containers = []
    for container in client.containers.list(all=True,
                                            filters={"name": "exegol-"}):
        id = container.attrs["Id"][:12]
        tag = container.attrs["Name"].replace('/exegol-', '')
        state = container.attrs["State"]["Status"]
        if state == "running":
            state = "[green]" + state + "[/green]"
        image = container.attrs["Config"]["Image"]
        logger.debug("Fetching details on containers creation")
        details = []
        if was_created_with_gui(container):
            details.append("--X11")
        if was_created_with_host_networking(container):
            details.append("--host-network")
        if was_created_with_device(container):
            details.append("--device {}".format(
                was_created_with_device(container)))
        if was_created_with_privileged(container):
            details.append("[orange3]--privileged[/orange3]")
        details = " ".join(details)
        logger.debug("Fetching volumes for each container")
        volumes = ""
        if container.attrs["HostConfig"]["Binds"]:
            for bind in container.attrs["HostConfig"]["Binds"]:
                volumes += bind.replace(":", " ↔ ") + "\n"
        if container.attrs["HostConfig"]["Mounts"]:
            for mount in container.attrs["HostConfig"]["Mounts"]:
                volumes += mount["VolumeOptions"]["DriverConfig"]["Options"][
                    "device"]
                volumes += " ↔ "
                volumes += mount["Target"]
                volumes += "\n"
        containers.append([id, tag, state, image, details, volumes])

    if options.verbosity == 0:
        table = Table(show_header=True,
                      header_style="bold blue",
                      border_style="blue",
                      box=box.SIMPLE)
        table.add_column("Container tag")
        table.add_column("State")
        table.add_column("Image (repo/image:tag)")
        table.add_column("Creation details")
        for container in containers:
            table.add_row(container[1], container[2], container[3],
                          container[4])
    elif options.verbosity >= 1:
        table = Table(show_header=True,
                      header_style="bold blue",
                      border_style="grey35",
                      box=box.SQUARE)
        table.add_column("Id")
        table.add_column("Container tag")
        table.add_column("State")
        table.add_column("Image (repo/image:tag)")
        table.add_column("Creation details")
        table.add_column("Binds & mounts")
        for container in containers:
            table.add_row(container[0], container[1], container[2],
                          container[3], container[4], container[5])
    console.print(table)
    print()
コード例 #12
0
ファイル: output.py プロジェクト: zha0/checksec.py
    def __init__(self, libc_detected: bool = False):
        """Init Rich Console and Table"""
        super().__init__(libc_detected)
        # init ELF table
        self.table_elf = Table(title="Checksec Results: ELF", expand=True)
        self.table_elf.add_column("File", justify="left", header_style="")
        self.table_elf.add_column("NX", justify="center")
        self.table_elf.add_column("PIE", justify="center")
        self.table_elf.add_column("Canary", justify="center")
        self.table_elf.add_column("Relro", justify="center")
        self.table_elf.add_column("RPATH", justify="center")
        self.table_elf.add_column("RUNPATH", justify="center")
        self.table_elf.add_column("Symbols", justify="center")
        if self._libc_detected:
            self.table_elf.add_column("FORTIFY", justify="center")
            self.table_elf.add_column("Fortified", justify="center")
            self.table_elf.add_column("Fortifiable", justify="center")
            self.table_elf.add_column("Fortify Score", justify="center")

        # init PE table
        self.table_pe = Table(title="Checksec Results: PE", expand=True)
        self.table_pe.add_column("File", justify="left", header_style="")
        self.table_pe.add_column("NX", justify="center")
        self.table_pe.add_column("Canary", justify="center")
        self.table_pe.add_column("ASLR", justify="center")
        self.table_pe.add_column("Dynamic Base", justify="center")
        self.table_pe.add_column("High Entropy VA", justify="center")
        self.table_pe.add_column("SEH", justify="center")
        self.table_pe.add_column("SafeSEH", justify="center")
        self.table_pe.add_column("Force Integrity", justify="center")
        self.table_pe.add_column("Control Flow Guard", justify="center")
        self.table_pe.add_column("Isolation", justify="center")
        self.table_pe.add_column("Authenticode", justify="center")

        # init console
        self.console = Console()

        # build progress bar
        self.process_bar = Progress(
            TextColumn("[bold blue]Processing...", justify="left"),
            BarColumn(bar_width=None),
            "{task.completed}/{task.total}",
            "•",
            "[progress.percentage]{task.percentage:>3.1f}%",
            console=self.console,
        )
        self.display_res_bar = Progress(
            BarColumn(bar_width=None),
            TextColumn("[bold blue]{task.description}", justify="center"),
            BarColumn(bar_width=None),
            console=self.console,
            transient=True,
        )
        self.enumerate_bar = Progress(
            TextColumn("[bold blue]Enumerating...", justify="center"),
            BarColumn(bar_width=None),
            console=self.console,
            transient=True,
        )

        self.process_task_id = None
コード例 #13
0
        )
        f.write("  supportedComponentsWithTests = [\n")
        for component, deps in build_inputs.items():
            available, missing = deps
            if len(missing) == 0 and component in components_with_tests:
                f.write(f'    "{component}"' + "\n")
        f.write("  ];\n")
        f.write("}\n")

    supported_components = reduce(lambda n, c: n + (build_inputs[c][1] == []),
                                  components.keys(), 0)
    total_components = len(components)
    print(f"{supported_components} / {total_components} components supported, "
          f"i.e. {supported_components / total_components:.2%}")

    if outdated:
        table = Table(title="Outdated dependencies")
        table.add_column("Package")
        table.add_column("Current")
        table.add_column("Wanted")
        for package, version in sorted(outdated.items()):
            table.add_row(package, version['current'], version['wanted'])

        console = Console()
        console.print(table)


if __name__ == "__main__":
    run_mypy()
    main()
コード例 #14
0
ファイル: analyze.py プロジェクト: puzzlepeaches/spraycharles
    def http_analyze(self, responses):
        # remove header row from list
        del responses[0]

        len_with_timeouts = len(responses)

        # remove lines with timeouts
        responses = [line for line in responses if line[2] != "TIMEOUT"]
        timeouts = len_with_timeouts - len(responses)

        response_lengths = []
        # Get the response length column for analysis
        for indx, line in enumerate(responses):
            response_lengths.append(int(line[3]))

        console.print(
            "[*] Calculating mean and standard deviation of response lengths.",
            style="info",
        )

        # find outlying response lengths
        length_elements = numpy.array(response_lengths)
        length_mean = numpy.mean(length_elements, axis=0)
        length_sd = numpy.std(length_elements, axis=0)
        console.print("[*] Checking for outliers.", style="info")
        length_outliers = [
            x for x in length_elements
            if (x > length_mean + 2 * length_sd or x < length_mean -
                2 * length_sd)
        ]

        length_outliers = list(set(length_outliers))
        len_indicies = []

        # find username / password combos with matching response lengths
        for hit in length_outliers:
            len_indicies += [
                i for i, x in enumerate(responses) if x[3] == str(hit)
            ]

        # print out logins with outlying response lengths
        if len(len_indicies) > 0:
            console.print("[+] Identified potentially sussessful logins!\n",
                          style="good")

            success_table = Table(show_footer=False, highlight=True)

            success_table.add_column("Username")
            success_table.add_column("Password")
            success_table.add_column("Response Code", justify="right")
            success_table.add_column("Response Length", justify="right")
            for x in len_indicies:
                success_table.add_row(
                    f"{responses[x][0]}",
                    f"{responses[x][1]}",
                    f"{responses[x][2]}",
                    f"{responses[x][3]}",
                )

            console.print(success_table)

            self.send_notification(len(len_indicies))

            print()

            # Returning true to indicate a successfully guessed credential
            return len(len_indicies)
        else:
            console.print(
                "[!] No outliers found or not enough data to find statistical significance.",
                style="danger",
            )
            print()
            return 0
コード例 #15
0
def speed_test(
    use_cache: bool = typer.Option(False, "--use-cache", "-u"),
    test_count: int = typer.Option(default=5),
    start: int = typer.Option(
        100,
        "--start",
        "-s",
        help="What power should the MP search begin with?",
    ),
    end: int = typer.Option(
        120,
        "--end",
        "-e",
        help="What power should the MP search end with?",
    ),
):
    console = Console()
    test_results = {"start": start, "end": end, "old": [], "new": []}
    with open("tmp/speed-test-cache.json") as f:
        cache = json.load(f)

    # Don't use the cache if it doesn't match the requested parameters
    if cache["start"] != start or cache["end"] != end:
        use_cache = False
        console.print(
            f"Cache doesn't match requested parameters. Not using cache.")

    if use_cache:
        test_results["old"] = cache["old"]
        test_results["old_dict"] = cache["old_dict"]
    else:
        console.print("Processing old explorer...")
        for _ in range(test_count):
            explorer_o = Explorer()
            explorer_o.explore(start, end)
            seconds = explorer_o.run_time
            test_results["old"].append(seconds)
        test_results["old_dict"] = explorer_o.collection.to_dict()

    console.print("Processing speedy explorer...")
    for _ in range(test_count):
        explorer_n = SpeedyExplorer()
        explorer_n.explore(start, end)
        seconds = explorer_n.run_time
        test_results["new"].append(seconds)
    test_results["new_dict"] = explorer_n.collection.to_dict()

    with open("tmp/speed-test-cache.json", "w") as f:
        json.dump(test_results, f, indent=4)

    ##
    # Ensure that refactoring didn't cause numbers to be missed
    assert test_results["new_dict"] == test_results["old_dict"], DeepDiff(
        test_results["old_dict"], test_results["new_dict"])

    ##
    # Calculate and display results
    table = Table(show_footer=False)
    table.add_column("Test")
    table.add_column("Original")
    table.add_column("Speedy")

    for i, (test_o,
            test_n) in enumerate(zip(test_results["old"],
                                     test_results["new"])):
        table.add_row(str(i), str(test_o), str(test_n))

    avg_o = round(sum(test_results["old"]) / len(test_results["old"]), 2)
    avg_n = round(sum(test_results["new"]) / len(test_results["new"]), 2)
    table.add_row("Average", str(avg_o), str(avg_n))

    console.print(f"Start: {start}")
    console.print(f"End: {end}")
    console.print(table)
    console.print(f"Average speed increase: {avg_o/avg_n}x", style="green")
コード例 #16
0
    def _print_results(results: List[_OUT_DICT], stage: str) -> None:
        # remove the dl idx suffix
        results = [{k.split("/dataloader_idx_")[0]: v for k, v in result.items()} for result in results]
        metrics_paths = {k for keys in apply_to_collection(results, dict, EvaluationLoop._get_keys) for k in keys}
        if not metrics_paths:
            return

        metrics_strs = [":".join(metric) for metric in metrics_paths]
        # sort both lists based on metrics_strs
        metrics_strs, metrics_paths = zip(*sorted(zip(metrics_strs, metrics_paths)))

        headers = [f"DataLoader {i}" for i in range(len(results))]

        # fallback is useful for testing of printed output
        term_size = shutil.get_terminal_size(fallback=(120, 30)).columns or 120
        max_length = int(min(max(len(max(metrics_strs, key=len)), len(max(headers, key=len)), 25), term_size / 2))

        rows: List[List[Any]] = [[] for _ in metrics_paths]

        for result in results:
            for metric, row in zip(metrics_paths, rows):
                val = EvaluationLoop._find_value(result, metric)
                if val is not None:
                    if isinstance(val, Tensor):
                        val = val.item() if val.numel() == 1 else val.tolist()
                    row.append(f"{val}")
                else:
                    row.append(" ")

        # keep one column with max length for metrics
        num_cols = int((term_size - max_length) / max_length)

        for i in range(0, len(headers), num_cols):
            table_headers = headers[i : (i + num_cols)]
            table_rows = [row[i : (i + num_cols)] for row in rows]

            table_headers.insert(0, f"{stage} Metric".capitalize())

            if _RICH_AVAILABLE:
                columns = [Column(h, justify="center", style="magenta", width=max_length) for h in table_headers]
                columns[0].style = "cyan"

                table = Table(*columns)
                for metric, row in zip(metrics_strs, table_rows):
                    row.insert(0, metric)
                    table.add_row(*row)

                console = get_console()
                console.print(table)
            else:
                row_format = f"{{:^{max_length}}}" * len(table_headers)
                half_term_size = int(term_size / 2)

                try:
                    # some terminals do not support this character
                    if sys.stdout.encoding is not None:
                        "─".encode(sys.stdout.encoding)
                except UnicodeEncodeError:
                    bar_character = "-"
                else:
                    bar_character = "─"
                bar = bar_character * term_size

                lines = [bar, row_format.format(*table_headers).rstrip(), bar]
                for metric, row in zip(metrics_strs, table_rows):
                    # deal with column overflow
                    if len(metric) > half_term_size:
                        while len(metric) > half_term_size:
                            row_metric = metric[:half_term_size]
                            metric = metric[half_term_size:]
                            lines.append(row_format.format(row_metric, *row).rstrip())
                        lines.append(row_format.format(metric, " ").rstrip())
                    else:
                        lines.append(row_format.format(metric, *row).rstrip())
                lines.append(bar)
                print(os.linesep.join(lines))
コード例 #17
0
ファイル: main.py プロジェクト: Merevoli-DatLuu/Get_Your_Book
def search_book(search_string):
    """
    Search books from search_string and print the book list
    """
    start = time.time()
    console = Console()

    with console.status("[bold green]Working on tasks...") as status:
        res = requests.get("https://b-ok.asia/s/?q=" + search_string)
        soup = BeautifulSoup(res.text, 'html.parser')
        items = soup.findAll('table', {'class': 'resItemTable'})
        data = []

        for item in items:
            id = item.tr.td.div['data-book_id']
            link = item.tr.td.a['href']
            head = item.tr.findAll('td', recursive=False)[1].table

            name = head.tr.td.h3.a.getText()
            publisher = head.tr.td.div.a.getText()
            authors = []
            authors_div = head.tr.td.findAll('div', recursive=False)

            if len(authors_div) > 1:
                authors = [i.getText() for i in authors_div[1].findAll('a')]

            t = head.findAll('tr', recursive = False)[1]\
                    .td\
                    .findAll('div', recursive = False)[1]\
                    .findAll('div', recursive = False)
            details = list(
                map(
                    lambda x:
                    [fr.getText() for fr in x.findAll('div', recursive=False)],
                    t))

            year = details[0][1]
            language = details[1][1]
            file_size = details[2][1]

            data.append({
                "id": id,
                "name": name,
                "publisher": publisher,
                "authors": authors,
                "year": year,
                "language": language,
                "file": file_size,
                "link": link
            })

    table = Table(title="Book Searching",
                  show_header=True,
                  header_style="bold bright_red")

    table.add_column("[cyan]STT[/cyan]", justify="center", style="cyan")
    table.add_column("[yellow]Name[/yellow]", style="yellow")
    table.add_column("[green]Publisher[/green]", justify="left", style="green")
    table.add_column("[magenta]Authors[/magenta]",
                     justify="left",
                     style="magenta")
    table.add_column("[blue]Year[/blue]", justify="right", style="blue")
    table.add_column("[green_yellow]Language[/green_yellow]",
                     justify="center",
                     style="green_yellow")
    table.add_column("[turquoise4]File[/turquoise4]",
                     justify="left",
                     style="turquoise4")

    stt = 1
    for d in data:
        table.add_row(str(stt), d['name'] + '\n', d['publisher'],
                      ",\n".join(d['authors']), d['year'], d['language'],
                      d['file'])
        stt += 1

    console.print(table)
    print("Time: {:.3f}s\n".format(time.time() - start))

    print_usage_1()
    option = console.input("[bright_blue]>>> [/bright_blue]")

    while option not in ("/back", "/b"):
        if option in ("/quit", "/q"):
            console.print("[bright_red]Exit[/bright_red]")
            exit(0)
        if option in ("/help", "/h"):
            print_usage_1()
        elif len(option.split(' ')) == 2:
            optionplt = option.split()
            if optionplt[0] in ("/detail", "/dt") and\
               optionplt[1].isnumeric() and\
               1 <= int(optionplt[1]) <= len(data):
                go_to_details(data[int(optionplt[1]) - 1])
            else:
                console.print("[bright_red]Invalid Input[/bright_red]")
        else:
            console.print("[bright_red]Invalid Input[/bright_red]")
        option = console.input("[bright_blue]>>> [/bright_blue]")
コード例 #18
0
ファイル: __init__.py プロジェクト: nf-core/tools
    def _print_results(self, show_passed=False):
        """Print linting results to the command line.

        Uses the ``rich`` library to print a set of formatted tables to the command line
        summarising the linting results.
        """

        log.debug("Printing final results")

        # Sort the results
        self.passed.sort(key=operator.attrgetter("message", "module_name"))
        self.warned.sort(key=operator.attrgetter("message", "module_name"))
        self.failed.sort(key=operator.attrgetter("message", "module_name"))

        # Find maximum module name length
        max_mod_name_len = 40
        for idx, tests in enumerate([self.passed, self.warned, self.failed]):
            try:
                for lint_result in tests:
                    max_mod_name_len = max(len(lint_result.module_name), max_mod_name_len)
            except:
                pass

        # Helper function to format test links nicely
        def format_result(test_results, table):
            """
            Given an list of error message IDs and the message texts, return a nicely formatted
            string for the terminal with appropriate ASCII colours.
            """
            # TODO: Row styles don't work current as table-level style overrides.
            # I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix
            last_modname = False
            row_style = None
            for lint_result in test_results:
                if last_modname and lint_result.module_name != last_modname:
                    if row_style:
                        row_style = None
                    else:
                        row_style = "magenta"
                last_modname = lint_result.module_name
                table.add_row(
                    Markdown(f"{lint_result.module_name}"),
                    os.path.relpath(lint_result.file_path, self.dir),
                    Markdown(f"{lint_result.message}"),
                    style=row_style,
                )
            return table

        def _s(some_list):
            if len(some_list) > 1:
                return "s"
            return ""

        # Print module linting results header
        console.print(Panel("[magenta]Module lint results"))

        # Table of passed tests
        if len(self.passed) > 0 and show_passed:
            console.print(
                rich.panel.Panel(r"[!] {} Test{} Passed".format(len(self.passed), _s(self.passed)), style="bold green")
            )
            table = Table(style="green", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("File path")
            table.add_column("Test message")
            table = format_result(self.passed, table)
            console.print(table)

        # Table of warning tests
        if len(self.warned) > 0:
            console.print(
                rich.panel.Panel(
                    r"[!] {} Test Warning{}".format(len(self.warned), _s(self.warned)), style="bold yellow"
                )
            )
            table = Table(style="yellow", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("File path")
            table.add_column("Test message")
            table = format_result(self.warned, table)
            console.print(table)

        # Table of failing tests
        if len(self.failed) > 0:
            console.print(
                rich.panel.Panel(r"[!] {} Test{} Failed".format(len(self.failed), _s(self.failed)), style="bold red")
            )
            table = Table(style="red", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("File path")
            table.add_column("Test message")
            table = format_result(self.failed, table)
            console.print(table)
コード例 #19
0
    def summary(
        self,
        x: tp.Optional[tp.Any] = None,
        depth: int = 2,
        tablefmt: str = "fancy_grid",
        return_repr: bool = False,
        initialize: bool = False,
        eval_shape: bool = True,
        **tablulate_kwargs,
    ) -> tp.Optional[str]:
        """
        Prints a summary of the network.
        Arguments:
            x: A sample of inputs to the network.
            depth: The level number of nested level which will be showed.
                Information about summaries from modules deeper than `depth`
                will be aggregated together.
            tablefmt: A string representing the style of the table generated by
                `tabulate`. See
                [python-tabulate](https://github.com/astanin/python-tabulate)
                for more options.
            tablulate_kwargs: Additional keyword arguments passed to `tabulate`.
                See [python-tabulate](https://github.com/astanin/python-tabulate)
                for more options.
        """

        if x is None:
            x = {}

        entries: tp.List[types.SummaryTableEntry]
        states = self.states.copy() if self.run_eagerly else self.states

        method = (self.call_summary_step
                  if self.run_eagerly else self.call_summary_step_jit)

        if eval_shape:
            entries = jax.eval_shape(self.call_summary_step, x, states)
        else:
            entries = method(x, states)

        total_entry = entries[-1]
        entries = entries[:-1]

        depth_groups: tp.Dict[
            str, tp.List[types.SummaryTableEntry]] = toolz.groupby(
                lambda entry: "/".join(entry.path.split("/")[:depth]), entries)

        entries = [
            utils.get_grouped_entry(entry, depth_groups) for entry in entries
            if entry.path in depth_groups
        ]

        main_table = Table(
            show_header=True,
            show_lines=True,
            show_footer=True,
            # box=rich.box.HORIZONTALS,
        )

        main_table.add_column("Layer")
        main_table.add_column("Outputs Shape")
        main_table.add_column("Trainable\nParameters")
        main_table.add_column("Non-trainable\nParameters")

        rows: tp.List[tp.List[str]] = []

        rows.append(["Inputs", utils.format_output(x), "", ""])

        for entry in entries:
            rows.append([
                f"{entry.path}{{pad}}  " + (f"[dim]{entry.module_type_name}[/]"
                                            if entry.module_type_name else ""),
                utils.format_output(entry.output_value),
                f"[green]{entry.trainable_params_count:,}[/]{{pad}}    {utils.format_size(entry.trainable_params_size)}"
                if entry.trainable_params_count > 0 else "",
                f"[green]{entry.non_trainable_params_count:,}[/]{{pad}}    {utils.format_size(entry.non_trainable_params_size)}"
                if entry.non_trainable_params_count > 0 else "",
            ])

        # global summaries
        params_count = total_entry.trainable_params_count
        params_size = total_entry.trainable_params_size
        states_count = total_entry.non_trainable_params_count
        states_size = total_entry.non_trainable_params_size
        total_count = params_count + states_count
        total_size = params_size + states_size

        rows.append([
            "",
            "Total",
            (f"[green]{params_count:,}[/]{{pad}}    {utils.format_size(params_size)}"
             if params_count > 0 else ""),
            (f"[green]{states_count:,}[/]{{pad}}    {utils.format_size(states_size)}"
             if states_count > 0 else ""),
        ])

        # add padding
        for col in range(4):
            max_length = max(
                len(line.split("{pad}")[0]) for row in rows
                for line in row[col].split("\n"))

            for row in rows:
                row[col] = "\n".join(
                    line.format(
                        pad=" " *
                        (max_length - len(line.rstrip().split("{pad}")[0])))
                    for line in row[col].rstrip().split("\n"))

        for row in rows[:-1]:
            main_table.add_row(*row)

        main_table.columns[1].footer = Text.from_markup(rows[-1][1],
                                                        justify="right")
        main_table.columns[2].footer = rows[-1][2]
        main_table.columns[3].footer = rows[-1][3]
        main_table.caption_style = "bold"
        main_table.caption = (
            "\nTotal Parameters: " +
            f"[green]{total_count:,}[/]   {utils.format_size(total_size)}"
            if total_count > 0 else "")

        summary = "\n" + utils.get_table_repr(main_table)

        print(summary)

        if return_repr:
            return summary
コード例 #20
0
ファイル: tree.py プロジェクト: wtiwana/rich
            maximum = max(max_measure + indent, maximum)
            if tree.expanded and tree.children:
                push(iter(tree.children))
                level += 1
        return Measurement(minimum, maximum)


if __name__ == "__main__":  # pragma: no cover

    from rich.console import RenderGroup
    from rich.markdown import Markdown
    from rich.panel import Panel
    from rich.syntax import Syntax
    from rich.table import Table

    table = Table(row_styles=["", "dim"])

    table.add_column("Released", style="cyan", no_wrap=True)
    table.add_column("Title", style="magenta")
    table.add_column("Box Office", justify="right", style="green")

    table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
    table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
    table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
    table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")

    code = """\
class Segment(NamedTuple):
    text: str = ""    
    style: Optional[Style] = None    
    is_control: bool = False    
コード例 #21
0
ファイル: display.py プロジェクト: 3070190/BaiduPCS-Py
def display_files(
    pcs_files: List[PcsFile],
    remotepath: Optional[str],
    sifters: List[Sifter] = [],
    highlight: bool = False,
    show_size: bool = False,
    show_date: bool = False,
    show_md5: bool = False,
    show_absolute_path: bool = False,
    show_dl_link: bool = False,
    show_hash_link: bool = False,
    hash_link_protocol: str = PcsRapidUploadInfo.default_hash_link_protocol(),
    csv: bool = False,
):
    if not pcs_files:
        return

    table = Table(box=SIMPLE, padding=0, show_edge=False)
    table.add_column()
    headers = []  # for csv
    headers.append("\t")
    if show_size:
        header = "Size"
        table.add_column(header, justify="right")
        headers.append(header)
    if show_date:
        header = "Modified Time"
        table.add_column(header, justify="center")
        headers.append(header)
    if show_md5:
        header = "md5"
        table.add_column(header, justify="left")
        headers.append(header)
    header = "Path"
    table.add_column(header, justify="left", overflow="fold")
    headers.append(header)
    if show_dl_link:
        header = "Download Link"
        table.add_column(header, justify="left", overflow="fold")
        headers.append(header)
    if show_hash_link:
        header = "Hash Link"
        table.add_column(header, justify="left", overflow="fold")
        headers.append(header)

    rows = []  # for csv

    max_size_str_len = max([len(str(pcs_file.size)) for pcs_file in pcs_files])
    for pcs_file in pcs_files:
        row: List[Union[str, Text]] = []

        if csv:
            row.append("-")
        else:
            tp = Text("-", style="bold red")
            row.append(tp)

        if show_size:
            size = human_size(pcs_file.size) if pcs_file.size else ""
            if csv:
                row.append(f"{size} {pcs_file.size}")
            else:
                row.append(f"{size} {pcs_file.size: >{max_size_str_len}}")
        if show_date:
            date = format_date(
                pcs_file.local_mtime) if pcs_file.local_mtime else ""
            row.append(date)
        if show_md5:
            md5 = pcs_file.md5 or ""
            row.append(md5)

        path = pcs_file.path if show_absolute_path else Path(
            pcs_file.path).name
        background = Text()
        if pcs_file.is_dir:
            if csv:
                row[0] = "d"
            else:
                tp._text = ["d"]
                background.style = "blue"

        if highlight and sifters:
            pats: List[Union[Pattern, str]] = list(
                filter(None, [
                    sifter.pattern() for sifter in sifters if sifter.include()
                ]))
            highlighter = Highlighter(pats, "yellow")
            _path = highlighter(path)
        else:
            _path = Text(path)

        if csv:
            row.append(path)
        else:
            row.append(background + _path)

        if show_dl_link:
            row.append(pcs_file.dl_link or "")

        rpinfo = pcs_file.rapid_upload_info
        if show_hash_link:
            link = ""
            if rpinfo:
                link = rpinfo.cs3l()
            row.append(link)

        if csv:
            rows.append(row)
        else:
            table.add_row(*row)

    if csv:
        _print(remotepath)
        _print("\t".join(headers))
        for row in rows:
            _print("\t".join(row))  # type: ignore
    else:
        console = Console()
        if remotepath:
            title = Text(remotepath, style="italic green")
            console.print(title)
        console.print(table)
コード例 #22
0
ファイル: runTests.py プロジェクト: ankraft/ACME-oneM2M-CSE
                )
                if errors > 0:
                    console.print(f'[red]Errors: {errors}')
            else:
                if args.showSkipped:
                    results[name] = (0, 0, 0, 0, 1,
                                     init.requestCount - startRequestCount)

    totalProcessTime = time.process_time() - totalProcessTimeStart
    totalExecTime = time.perf_counter() - totalTimeStart

    # Print Summary
    console.print()
    table = Table(
        show_header=True,
        header_style='bright_blue',
        show_footer=True,
        footer_style='',
        title='[dim][[/dim][red][i]ACME[/i][/red][dim]][/dim] - Test Results')
    table.add_column('Test Suite', footer='Totals', no_wrap=True)
    table.add_column(
        'Count',
        footer=
        f'[spring_green3]{totalRunTests if totalErrors == 0 else str(totalRunTests)}[/spring_green3]',
        justify='right')
    table.add_column('Skipped',
                     footer=f'[yellow]{totalSkipped}[/yellow]' if
                     totalSkipped > 0 else '[spring_green3]0[spring_green3]',
                     justify='right')
    table.add_column('Errors',
                     footer=f'[red]{totalErrors}[/red]'
                     if totalErrors > 0 else '[spring_green3]0[spring_green3]',
コード例 #23
0
def print_users(channel_info, user_id, client):
    users = channel_info['users']

    number_of_users = len(users)

    print(
        Fore.GREEN +
        "______________________________Joining Channel_______________________________\n"
    )
    # print(channel_info)
    print("Channel: -> ")
    print("ChannelID: ", channel_info['channel_id'], " ChannelName: ",
          channel_info['channel'])
    print("Topic: ", channel_info['topic'])
    print(Fore.CYAN)
    print("Club: -> ")
    print("ClubID: ", channel_info['club_id'], " ChannelInfo: ",
          channel_info['club_name'])
    print("\nNumber of Users: ", number_of_users)
    print(
        "____________________________________________________________________________"
    )
    print(Fore.RED)

    # List currently available users (TOP 20 only.)
    # Also, check for the current user's speaker permission.
    channel_speaker_permission = False
    console = Console()
    table = Table(show_header=True, header_style="bold magenta")
    table.add_column("#", style="cyan", justify="right")
    table.add_column("user_id")
    table.add_column("username")
    table.add_column("name")
    table.add_column("is_speaker")
    table.add_column("is_moderator")
    table.add_column("description")

    i = 1
    for user in users:
        is_speaker = user['is_speaker']
        if not is_speaker:
            is_speaker = "-"
        is_moderator = user['is_moderator']
        if not is_moderator:
            is_moderator = "-"

        desc = "-----"
        if user['is_speaker']:
            desc = client.get_profile(user['user_id'])['user_profile']['bio']
        if i % 2 == 0:
            table.add_row(
                '[white]' + str(i),
                '[white]' + str(user['user_id']),
                '[white]' + str(user['name']),
                '[white]' + str(user['username']),
                '[white]' + str(is_speaker),
                '[white]' + str(is_moderator),
                '[white]' + str(desc),
            )
        else:
            table.add_row(
                '[orange1]' + str(i),
                '[orange1]' + str(user['user_id']),
                '[orange1]' + str(user['name']),
                '[orange1]' + str(user['username']),
                '[orange1]' + str(is_speaker),
                '[orange1]' + str(is_moderator),
                '[orange1]' + str(desc),
            )
        i += 1
        # Check if the user is the speaker
        if user['user_id'] == int(user_id):
            channel_speaker_permission = bool(user['is_speaker'])

        if i > 25:
            break
    console.print(table)
コード例 #24
0
print("[green]Welcome to Ping Reporter![/green]")
print("[cyan]Please enter the network you wish to test...[/cyan]")
print("Example: < 192.168.10.0/24 >")
subnet = input("\nEnter network: ")
print("\n")
network = ipaddress.ip_network(subnet)

for n in network.hosts():
    IP = str(n)
    p[IP] = Popen(['ping', '-c', '4', '-i', '0.2', IP], stdout=DEVNULL)

while p:
    for IP, proc in p.items():
        if proc.poll() is not None:
            del p[IP]
            if proc.returncode == 0:
                active_list.append(IP)
            elif proc.returncode == 1:
                inactive_list.append(IP)
            else:
                print(f"{IP} ERROR")
            break

table = Table(title="PING REPORT \n" + localtime)
table.add_column("Active Hosts", justify="center", style="green")
table.add_column("Inactive Hosts", justify="center", style="red")
for (a, i) in itertools.zip_longest(active_list, inactive_list):
    table.add_row(a, i)
console = Console()
console.print(table)
コード例 #25
0
ファイル: bump_versions.py プロジェクト: nf-core/tools
    def _print_results(self):
        """
        Print the results for the bump_versions command
        Uses the ``rich`` library to print a set of formatted tables to the command line
        summarising the linting results.
        """

        log.debug("Printing bump_versions results")

        console = Console(force_terminal=rich_force_colors())
        # Find maximum module name length
        max_mod_name_len = 40
        for m in [self.up_to_date, self.updated, self.failed]:
            try:
                max_mod_name_len = max(len(m[2]), max_mod_name_len)
            except:
                pass

        def _s(some_list):
            if len(some_list) > 1:
                return "s"
            return ""

        def format_result(module_updates, table):
            """
            Create rows for module updates
            """
            # TODO: Row styles don't work current as table-level style overrides.
            # I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix
            last_modname = False
            row_style = None
            for module_update in module_updates:
                if last_modname and module_update[1] != last_modname:
                    if row_style:
                        row_style = None
                    else:
                        row_style = "magenta"
                last_modname = module_update[1]
                table.add_row(
                    Markdown(f"{module_update[1]}"),
                    Markdown(f"{module_update[0]}"),
                    style=row_style,
                )
            return table

        # Table of up to date modules
        if len(self.up_to_date) > 0 and self.show_up_to_date:
            console.print(
                rich.panel.Panel(
                    r"[!] {} Module{} version{} up to date.".format(
                        len(self.up_to_date), _s(self.up_to_date),
                        _s(self.up_to_date)),
                    style="bold green",
                ))
            table = Table(style="green", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("Update Message")
            table = format_result(self.up_to_date, table)
            console.print(table)

        # Table of updated modules
        if len(self.updated) > 0:
            console.print(
                rich.panel.Panel(r"[!] {} Module{} updated".format(
                    len(self.updated), _s(self.updated)),
                                 style="bold yellow"))
            table = Table(style="yellow", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("Update message")
            table = format_result(self.updated, table)
            console.print(table)

        # Table of modules that couldn't be updated
        if len(self.failed) > 0:
            console.print(
                rich.panel.Panel(r"[!] {} Module update{} failed".format(
                    len(self.failed), _s(self.failed)),
                                 style="bold red"))
            table = Table(style="red", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("Update message")
            table = format_result(self.failed, table)
            console.print(table)

        # Table of modules ignored due to `.nf-core.yml`
        if len(self.ignored) > 0:
            console.print(
                rich.panel.Panel(r"[!] {} Module update{} ignored".format(
                    len(self.ignored), _s(self.ignored)),
                                 style="grey58"))
            table = Table(style="grey58", box=rich.box.ROUNDED)
            table.add_column("Module name", width=max_mod_name_len)
            table.add_column("Update message")
            table = format_result(self.ignored, table)
            console.print(table)
コード例 #26
0
ファイル: test_table.py プロジェクト: danielSanchezQ/rich-1
def render_tables():
    console = Console(
        width=60,
        force_terminal=True,
        file=io.StringIO(),
        legacy_windows=False,
        color_system=None,
    )

    table = Table(title="test table", caption="table caption", expand=True)
    table.add_column("foo", footer=Text("total"), no_wrap=True, overflow="ellipsis")
    table.add_column("bar", justify="center")
    table.add_column("baz", justify="right")

    table.add_row("Averlongwordgoeshere", "banana pancakes", None)

    assert Measurement.get(console, table, 80) == Measurement(41, 48)

    for width in range(10, 60, 5):
        console.print(table, width=width)

    table.expand = False
    console.print(table, justify="left")
    console.print(table, justify="center")
    console.print(table, justify="right")

    assert table.row_count == 1

    table.row_styles = ["red", "yellow"]
    table.add_row("Coffee")
    table.add_row("Coffee", "Chocolate", None, "cinnamon")

    assert table.row_count == 3

    console.print(table)

    table.show_lines = True
    console.print(table)

    table.show_footer = True
    console.print(table)

    table.show_edge = False

    console.print(table)

    table.padding = 1
    console.print(table)

    table.width = 20
    assert Measurement.get(console, table, 80) == Measurement(20, 20)
    console.print(table)

    table.columns[0].no_wrap = True
    table.columns[1].no_wrap = True
    table.columns[2].no_wrap = True

    console.print(table)

    table.padding = 0
    table.width = 60
    table.leading = 1
    console.print(table)

    return console.file.getvalue()
コード例 #27
0
def show_atlases(show_local_path=False):
    """Prints a formatted table with the name and version of local (downloaded)
    and online (available) atlases. To do so, dowload info on
    the latest atlas version and compares it with what it's stored
    locally.

    Arguments
    ---------
    show_local_path : bool
        If true, local path of the atlases are in the table with the rest
        (optional, default=False).

    """

    available_atlases = get_all_atlases_lastversions()

    # Get local atlases:
    atlases = get_atlases_lastversions()

    # Get atlases not yet downloaded:
    for atlas in available_atlases.keys():

        if atlas not in atlases.keys():
            atlases[str(atlas)] = dict(
                downloaded=False,
                local="",
                version="",
                latest_version=str(available_atlases[atlas]),
                updated=None,
            )

    # Print table:
    table = Table(
        show_header=True,
        header_style="bold green",
        show_lines=True,
        expand=False,
        box=None,
    )

    table.add_column("Name", no_wrap=True, width=32)
    table.add_column("Downloaded", justify="center")
    table.add_column("Updated", justify="center")
    table.add_column("Local version", justify="center")
    table.add_column("Latest version", justify="center")
    if show_local_path:
        table.add_column("Local path")

    for n, (atlas, info) in enumerate(atlases.items()):
        if info["downloaded"]:
            downloaded = "[green]:heavy_check_mark:[/green]"

            if info["version"] == info["latest_version"]:
                updated = "[green]:heavy_check_mark:[/green]"
            else:
                updated = "[red dim]x"

        else:
            downloaded = ""
            updated = ""

        row = [
            "[bold]" + atlas,
            downloaded,
            updated,
            "[#c4c4c4]" +
            info["version"] if "-" not in info["version"] else "",
            "[#c4c4c4]" + info["latest_version"],
        ]

        if show_local_path:
            row.append(info["local"])

        table.add_row(*row)
    rprint(Panel.fit(
        table,
        width=88,
        title="Brainglobe Atlases",
    ))
コード例 #28
0
def print_credits():
    table = Table(show_header=True)
    table.add_column("Author", style="yellow")
    table.add_column("Contact", style="yellow")
    table.add_row("Teja Swaroop", "[email protected] ")
    console.print(table)
コード例 #29
0
ファイル: or_store.py プロジェクト: JackMorash/RPG
def store():
    """Function for creating the store UI"""
    # Determines price of each type of item
    p = vars.amount_spent_on_miscellaneous
    b = vars.amount_spent_on_bullets
    c = vars.amount_spent_on_clothing
    f = vars.amount_spent_on_food
    o = vars.amount_spent_on_animals
    # Creates store UI using table library
    table = Table(show_header=True, header_style="bold magenta")
    table.add_column("Goods")
    table.add_column("Spent", justify="right")
    # Creates "oxen" portion of the table
    table.add_row("1. Oxen", f"[green]${o}[/green]")
    # Creates "food" portion of the table
    table.add_row(
        "2. Food",
        f"[green]${f}[/green]",
    )
    # Creates "clothing" portion of the table
    table.add_row(
        "3. Clothing",
        f"[green]${c}[/green]",
    )
    # Creates "ammunition" portion of the table
    table.add_row(
        "4. Ammunition",
        f"[green]${b}[/green]",
    )
    # Creates "parts" portion of the table
    table.add_row(
        "5. Misc. (Medicine, Wagon parts etc.)",
        f"[green]${p}[/green]",
    )
    # Creates the total spent portion of the table
    table.add_row("\nTotal", f"\n[green]${o+f+c+b+p}[/green]")
    console.print(table)

    while True:
        # Displays and handles store options and which option the player 
        # selects
        try:
            print(
                "Which item would you like to buy?\n\n[cyan italic]\
Type 'leave' to exit the store[/cyan italic]"
            )
            selection = input("\n-->")
            if selection == "1":
                console.clear()
                oxen()
            elif selection == "2":
                console.clear()
                food()
                break
            elif selection == "3":
                console.clear()
                clothes()
                break
            elif selection == "4":
                console.clear()
                bullets()
                break
            elif selection == "5":
                console.clear()
                parts()
                break
            elif selection == "exit":
                console.clear()
                exit()
            elif selection == "leave":
                # Determines if the player has enough oxen to play the game
                if vars.amount_spent_on_animals < 1:
                    print(
                        "[cyan italic] Don't forget,\
 you'll need oxen to pull your wagon![/cyan italic]"
                    )
                    input("Press Enter to Continue...")
                    console.clear()
                    store()
                elif vars.amount_spent_on_animals > 1:
                    console.clear()
                    print(
                        "[cyan italic]Well then, you are ready to start.\
 Good luck! You have a long and difficult\
 journey ahead of you...[/cyan italic]"
                    )
                input("Press Enter to Continue...")
                console.clear()
                walking_trail()
                break
        except ValueError:
            print("\n[red]Invalid Selection[/red]\n")
            input("\nPress Enter to Continue")
            continue
コード例 #30
0
def print_rich_table(
    df: pd.DataFrame,
    show_index: bool = False,
    title: str = "",
    index_name: str = "",
    headers: Union[List[str], pd.Index] = None,
    floatfmt: Union[str, List[str]] = ".2f",
):
    """Prepare a table from df in rich

    Parameters
    ----------
    df: pd.DataFrame
        Dataframe to turn into table
    show_index: bool
        Whether to include index
    title: str
        Title for table
    index_name : str
        Title for index column
    headers: List[str]
        Titles for columns
    floatfmt: str
        String to
    """

    if gtff.USE_TABULATE_DF:
        table = Table(title=title, show_lines=True)

        if show_index:
            table.add_column(index_name)

        if headers is not None:
            if isinstance(headers, pd.Index):
                headers = list(headers)
            if len(headers) != len(df.columns):
                log_and_raise(
                    ValueError(
                        "Length of headers does not match length of DataFrame")
                )
            for header in headers:
                table.add_column(str(header))
        else:
            for column in df.columns:
                table.add_column(str(column))

        if isinstance(floatfmt, list):
            if len(floatfmt) != len(df.columns):
                log_and_raise(
                    ValueError(
                        "Length of floatfmt list does not match length of DataFrame columns."
                    ))
        if isinstance(floatfmt, str):
            floatfmt = [floatfmt for _ in range(len(df.columns))]

        for idx, values in zip(df.index.tolist(), df.values.tolist()):
            row = [str(idx)] if show_index else []
            row += [
                str(x) if not isinstance(x, float) else f"{x:{floatfmt[idx]}}"
                for idx, x in enumerate(values)
            ]
            table.add_row(*row)
        console.print(table)
    else:
        console.print(df.to_string())