コード例 #1
0
 def print_to_file(self, log: Path):
     with open(log, "w") as file:
         tree = self._get_tree(verbose=True)
         console = Console(record=True)
         with console.capture() as capture:
             console.print(tree)
         file.write(console.export_text())
コード例 #2
0
 def test_working_listcall_archived(self, mock_subprocess):
     """ Test that listing pipelines works, showing archived pipelines """
     wf_table = nf_core.list.list_workflows(show_archived=True)
     console = Console(record=True)
     console.print(wf_table)
     output = console.export_text()
     assert "exoseq" in output
コード例 #3
0
def test_modules_list_remote(self):
    """Test listing available modules"""
    mods_list = nf_core.modules.ModuleList(None, remote=True)
    listed_mods = mods_list.list_modules()
    console = Console(record=True)
    console.print(listed_mods)
    output = console.export_text()
    assert "fastqc" in output
コード例 #4
0
def render_as_string(renderable: RenderableType) -> str:
    """Render any `rich` object in a fake console and
    return a *style-less* version of it as a string."""

    with open(os.devnull, "w") as null_stream:
        fake_console = Console(file=null_stream, record=True)
        fake_console.print(renderable)
        return fake_console.export_text()
コード例 #5
0
def test_render():
    console = Console(width=15, record=True)
    test = Text.from_markup(
        "[u][b]Where[/b] there is a [i]Will[/i], there is a Way.[/u]")
    console.print(test)
    output = console.export_text(styles=True)
    expected = "\x1b[1;4mWhere\x1b[0m\x1b[4m there is \x1b[0m\n\x1b[4ma \x1b[0m\x1b[3;4mWill\x1b[0m\x1b[4m, there \x1b[0m\n\x1b[4mis a Way.\x1b[0m\n"
    assert output == expected
コード例 #6
0
 def test_working_listcall(self, mock_subprocess):
     """ Test that listing pipelines works """
     wf_table = nf_core.list.list_workflows()
     console = Console(record=True)
     console.print(wf_table)
     output = console.export_text()
     assert "rnaseq" in output
     assert "exoseq" not in output
コード例 #7
0
 def test_modules_list(self):
     """Test listing available modules"""
     self.mods.pipeline_dir = None
     listed_mods = self.mods.list_modules()
     console = Console(record=True)
     console.print(listed_mods)
     output = console.export_text()
     assert "fastqc" in output
コード例 #8
0
def test_modules_install_and_list_pipeline(self):
    """Test listing locally installed modules"""
    self.mods_install.install("trimgalore")
    mods_list = nf_core.modules.ModuleList(self.pipeline_dir, remote=False)
    listed_mods = mods_list.list_modules()
    console = Console(record=True)
    console.print(listed_mods)
    output = console.export_text()
    assert "trimgalore" in output
コード例 #9
0
def test_modules_list_pipeline(self):
    """Test listing locally installed modules"""
    mods_list = nf_core.modules.ModuleList(self.pipeline_dir, remote=False)
    listed_mods = mods_list.list_modules()
    console = Console(record=True)
    console.print(listed_mods)
    output = console.export_text()
    assert "fastqc" in output
    assert "multiqc" in output
コード例 #10
0
ファイル: test_licenses.py プロジェクト: zzygyx9119/tools
 def test_run_licences_successful_json(self):
     self.license_obj.as_json = True
     console = Console(record=True)
     console.print(self.license_obj.run_licences())
     output = json.loads(console.export_text())
     for package in output:
         if "multiqc" in package:
             assert output[package][0] == "GPL v3"
             break
     else:
         raise LookupError("Could not find MultiQC")
コード例 #11
0
def test_get_recordings():
    from bbb_pymonitor.show_usage import get_recordings_info

    recordings = dict(GETRECORDINGS_EXAMPLES)["one.xml"]
    table = get_recordings_info(recordings)
    console = Console(record=True, width=120)
    console.print()
    console.print(table)
    text = console.export_text()
    assert "ACME stakeholders meeting" in text
    assert "Greenlight" in text
    assert "breakfast" in text
コード例 #12
0
def test_get_meeting():
    from bbb_pymonitor.show_usage import get_meeting_info

    meetings = dict(GETMEETINGS_EXAMPLES)["manypeople.xml"]

    table = get_meeting_info(meetings[0])
    console = Console(record=True, width=120)
    console.print()
    console.print(table)
    text = console.export_text()
    assert "Professor Chaos" in text
    assert "World Domination discussion" in text
コード例 #13
0
def test_get_summary():
    from bbb_pymonitor.show_usage import get_summary_table

    meetings = dict(GETMEETINGS_EXAMPLES)["two-meetings.xml"]

    table = get_summary_table(meetings)
    console = Console(record=True, width=120)
    console.print()
    console.print(table)
    text = console.export_text()
    assert "John's room" in text
    assert "A room" in text
コード例 #14
0
ファイル: test_console.py プロジェクト: sthagen/python-rich
def test_capture_and_record(capsys):
    recorder = Console(record=True)
    recorder.print("ABC")

    with recorder.capture() as capture:
        recorder.print("Hello")

    assert capture.get() == "Hello\n"

    recorded_text = recorder.export_text()
    out, err = capsys.readouterr()

    assert recorded_text == "ABC\nHello\n"
    assert capture.get() == "Hello\n"
    assert out == "ABC\n"
コード例 #15
0
def test_table_show_header_false_substitution(box, result):
    """When the box style is one with a custom header edge, it should be substituted for
    the equivalent box that does not have a custom header when show_header=False"""
    table = Table(show_header=False, box=box)
    table.add_column()
    table.add_column()

    table.add_row("1", "2")
    table.add_row("3", "4")

    console = Console(record=True)
    console.print(table)
    output = console.export_text()

    assert output == result
コード例 #16
0
ファイル: plugin.py プロジェクト: darrenburns/pytest-clarity
def pytest_assertrepr_compare(config, op, left, right):
    if config.getoption("-v") < 2:
        return

    op = display_op_for(op)
    width = int(config.getoption("--diff-width"))
    show_symbols = bool(config.getoption("--diff-symbols"))

    diff = Diff(left, right, width, show_symbols)

    output = StringIO()
    console = Console(file=output, record=True)

    console.print("\n[green]LHS[/] vs [red]RHS[/] shown below\n")
    console.print(diff)

    diff_text = console.export_text(styles=True)

    return [
        f"{display_op_for(op)} failed. [pytest-clarity diff shown]",
        *[f"\033[0m{line}" for line in diff_text.split(f"\n")],
    ]
コード例 #17
0
                  "$952,110,690")
    table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
    table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi",
                  "$1,332,539,889")
    table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story",
                  "$1,332,439,889")

    console.print(table)


# Prints table
print_table()

# Get console output as text
file1 = "table_export_plaintext.txt"
text = console.export_text()
with open(file1, "w") as file:
    file.write(text)
print(f"Exported console output as plain text to {file1}")

# Calling print_table again because console output buffer
# is flushed once export function is called
print_table()

# Get console output as html
# use clear=False so output is not flushed after export
file2 = "table_export_html.html"
html = console.export_html(clear=False)
with open(file2, "w") as file:
    file.write(html)
print(f"Exported console output as html to {file2}")
コード例 #18
0
ファイル: test_licenses.py プロジェクト: zzygyx9119/tools
 def test_run_licences_successful(self):
     console = Console(record=True)
     console.print(self.license_obj.run_licences())
     output = console.export_text()
     assert "GPL v3" in output
コード例 #19
0
 def generate_string_report(self):
     recorder = Console(record=True)
     with recorder.capture():
         recorder.print(self.report)
     return recorder.export_text()
コード例 #20
0
ファイル: utils.py プロジェクト: veeruds/central-api-cli
    def output(
        self,
        outdata: Union[List[str], Dict[str, Any]],
        tablefmt: str = "rich",
        title: str = None,
        caption: str = None,
        account: str = None,
        config=None,
        ok_status: Union[int, List[int], Tuple[int, str],
                         List[Tuple[int, str]]] = None,
    ) -> str:
        # log.debugv(f"data passed to output():\n{pprint(outdata, indent=4)}")
        def _do_subtables(data: list, tablefmt: str = "rich"):
            out = []
            for inner_dict in data:  # the object: switch/vlan etc dict
                for key, val in inner_dict.items():
                    if not isinstance(val, (list, dict, tuple)):
                        if val is None:
                            inner_dict[key] = ''
                        elif isinstance(val,
                                        str) and val.lower() in ['up', 'down']:
                            color = 'red' if val.lower() == 'down' else 'green'
                            if tablefmt == 'rich':
                                inner_dict[
                                    key] = f'[b {color}]{val.title()}[/b {color}]'
                            else:
                                inner_dict[key] = typer.style(val.title(),
                                                              fg=color)
                        else:
                            if tablefmt == 'rich':
                                inner_dict[key] = Text(str(val), style=None)
                            else:
                                inner_dict[key] = str(val)
                    else:
                        val = self.listify(val)
                        if val and tablefmt == "rich" and hasattr(
                                val[0], 'keys'):
                            inner_table = Table(
                                *(k for k in val[0].keys()),
                                show_header=True,
                                # padding=(0, 0),
                                pad_edge=False,
                                collapse_padding=True,
                                show_edge=False,
                                header_style="bold cyan",
                                box=SIMPLE)
                            _ = [
                                inner_table.add_row(*[
                                    self.do_pretty(kk, str(vv))
                                    for kk, vv in v.items()
                                ]) for v in val
                            ]
                            with console.capture():
                                console.print(inner_table)
                            inner_dict[key] = console.export_text()
                        elif val and tablefmt == "tabulate" and hasattr(
                                val[0], 'keys'):
                            inner_table = tabulate(val,
                                                   headers="keys",
                                                   tablefmt=tablefmt)
                            inner_dict[key] = inner_table
                        else:
                            if all(isinstance(v, str) for v in val):
                                inner_dict[key] = ", ".join(val)
                out.append(inner_dict)
            return out

        raw_data = outdata
        _lexer = table_data = None

        if config and config.sanitize and raw_data and all(
                isinstance(x, dict) for x in raw_data):
            redact = [
                "mac", "serial", "neighborMac", "neighborSerial",
                "neighborPortMac", "longitude", "latitude"
            ]
            outdata = [{
                k: d[k] if k not in redact else "--redacted--"
                for k in d
            } for d in raw_data]

        # -- // List[str, ...] \\ --  Bypass all formatters, (config file output, etc...)
        if outdata and all(isinstance(x, str) for x in outdata):
            tablefmt = "strings"

        # -- convert List[dict] --> Dict[dev_name: dict] for yaml/json outputs
        if tablefmt in ['json', 'yaml', 'yml']:
            outdata = self.listify(outdata)
            if outdata and 'name' in outdata[0]:
                outdata: Dict[str, Dict[str, Any]] = {
                    item['name']:
                    {k: v
                     for k, v in item.items() if k != 'name'}
                    for item in outdata
                }

        if tablefmt == "json":
            raw_data = json.dumps(outdata, indent=4)
            _lexer = lexers.JsonLexer

        elif tablefmt in ["yml", "yaml"]:
            raw_data = yaml.dump(outdata, sort_keys=False)
            _lexer = lexers.YamlLexer

        elif tablefmt == "csv":
            raw_data = table_data = "\n".join([
                ",".join([
                    k if outdata.index(d) == 0 else str(v)
                    for k, v in d.items() if k not in CUST_KEYS
                ]) for d in outdata
            ])

        elif tablefmt == "rich":
            from rich.console import Console
            from rich.table import Table
            from rich.box import HORIZONTALS, SIMPLE
            from rich.text import Text
            from centralcli import constants
            console = Console(record=True)

            customer_id, customer_name = "", ""
            # outdata = self.listify(outdata)

            # -- // List[dict, ...] \\ --
            if outdata and all(isinstance(x, dict) for x in outdata):
                customer_id = outdata[0].get("customer_id", "")
                customer_name = outdata[0].get("customer_name", "")
                outdata = [{k: v
                            for k, v in d.items() if k not in CUST_KEYS}
                           for d in outdata]

                table = Table(
                    # show_edge=False,
                    show_header=True,
                    title=title,
                    header_style='magenta',
                    show_lines=False,
                    box=HORIZONTALS,
                    row_styles=['none', 'dark_sea_green'])

                fold_cols = ['description']
                _min_max = {'min': 10, 'max': 30}
                set_width_cols = {'name': _min_max, 'model': _min_max}
                full_cols = [
                    'mac', 'serial', 'ip', 'public ip', 'version', 'radio',
                    'id'
                ]

                for k in outdata[0].keys():
                    if k in fold_cols:
                        table.add_column(k,
                                         overflow='fold',
                                         max_width=115,
                                         justify='left')
                    elif k in set_width_cols:
                        table.add_column(k,
                                         min_width=set_width_cols[k]['min'],
                                         max_width=set_width_cols[k]['max'],
                                         justify='left')
                    elif k in full_cols:
                        table.add_column(k, no_wrap=True, justify='left')
                    else:
                        table.add_column(k, justify='left')

                formatted = _do_subtables(outdata)
                [
                    table.add_row(*list(in_dict.values()))
                    for in_dict in formatted
                ]

                if title:
                    table.title = f'[italic cornflower_blue]{constants.what_to_pretty(title)}'
                if account or caption:
                    table.caption_justify = 'left'
                    table.caption = '' if not account else f'[italic dark_olive_green2] Account: {account}'
                    if caption:
                        table.caption = f"[italic dark_olive_green2]{table.caption}  {caption}"

                data_header = f"--\n{'Customer ID:':15}{customer_id}\n{'Customer Name:':15} {customer_name}\n--\n"

                with console.capture():
                    console.print(table)

                raw_data = console.export_text(clear=False)
                table_data = console.export_text(styles=True)

                raw_data = f"{data_header}{raw_data}" if customer_id else f"{raw_data}"
                table_data = f"{data_header}{table_data}" if customer_id else f"{table_data}"

        elif tablefmt == "tabulate":
            customer_id = customer_name = ""
            outdata = self.listify(outdata)

            # -- // List[dict, ...] \\ --
            if outdata and all(isinstance(x, dict) for x in outdata):
                customer_id = outdata[0].get("customer_id", "")
                customer_name = outdata[0].get("customer_name", "")
                outdata = [{k: v
                            for k, v in d.items() if k not in CUST_KEYS}
                           for d in outdata]
                raw_data = outdata

                outdata = _do_subtables(outdata, tablefmt=tablefmt)
                # outdata = [dict((k, v) for k, v in zip(outdata[0].keys(), val)) for val in outdata]

                table_data = tabulate(outdata,
                                      headers="keys",
                                      tablefmt=tablefmt)
                td = table_data.splitlines(keepends=True)
                table_data = f"{typer.style(td[0], fg='cyan')}{''.join(td[1:])}"

                data_header = f"--\n{'Customer ID:':15}{customer_id}\n" \
                              f"{'Customer Name:':15} {customer_name}\n--\n"
                table_data = f"{data_header}{table_data}" if customer_id else f"{table_data}"
                raw_data = f"{data_header}{raw_data}" if customer_id else f"{raw_data}"

        else:  # strings output No formatting
            # -- // List[str, ...] \\ --
            if len(outdata) == 1:
                if "\n" not in outdata[0]:
                    # we can format green as only success output is sent through formatter.
                    table_data = typer.style(f"  {outdata[0]}", fg="green")
                    raw_data = outdata[0]
                else:  # template / config file output
                    # get rid of double nl @ EoF (configs)
                    raw_data = table_data = "{}\n".format(
                        '\n'.join(outdata).rstrip('\n'))
            else:
                raw_data = table_data = '\n'.join(outdata)
                # Not sure what hit's this, but it was created so something must
                log.debug("List[str] else hit")

        if _lexer and raw_data:
            table_data = highlight(
                bytes(raw_data, 'UTF-8'), _lexer(),
                formatters.Terminal256Formatter(style='solarized-dark'))

        return self.Output(rawdata=raw_data,
                           prettydata=table_data,
                           config=config)
コード例 #21
0
ファイル: __main__.py プロジェクト: mrzv/saturn
def run(infn: "input notebook",
        outfn: "output notebook (if empty, input modified in place)",
        clean: "run from scratch, ignoring checkpoints" = False,
        auto_capture: "automatically capture images" = False,
        debug: "show debugging information" = False,
        dry_run: "don't save the processed notebook" = False,
        only_root_output:
        "suppress output everywhere but rank 0 (for MPI)" = False,
        repl: "run REPL after the notebook is processed" = False):
    """Run the notebook."""
    if not outfn:
        outfn = infn

    if os.path.exists(infn):
        with open(infn) as f:
            cells = c.parse(f)
    else:
        cells = []

    def output(cell):
        if root or (not only_root_output and type(cell) is c.OutputCell):
            show_console(cell, rule=debug, verbose=debug)

    def info(*args, block=False, **kw):
        if root:
            if not block:
                console.print(Rule(*args, **kw))
            else:
                console.print(*args, **kw)

    nb = notebook.Notebook(name=infn, auto_capture=auto_capture)
    nb.add(cells)

    if not clean:
        checkpoint = nb.find_checkpoint()
        if checkpoint is not None:
            info(f"Skipping to checkpoint {checkpoint}", style='magenta')
            nb.skip(checkpoint, output)
            info('Resuming', style="magenta")

    try:
        nb.process(output, info)

        if repl:
            run_repl(nb, output, outfn, dry_run)
    except SystemExit:
        info("Caught SystemExit")
        nb.move_all_incoming()
    except:
        info("Caught exception, aborting")
        from .traceback import Traceback
        tb = Traceback(nb, debug=debug, width=80)

        console_tb = Console(record=True, width=80, theme=theme)
        console_tb.print(tb)

        nb.skip_next_output()
        nb.append(c.OutputCell.from_string(console_tb.export_text()))
        nb.move_all_incoming()

    if not dry_run and root:
        nb.save(outfn)
コード例 #22
0
    def output_profiles(
        self,
        stats: ScaleneStatistics,
        pid: int,
        profile_this_code: Callable[[Filename, LineNumber], bool],
        python_alias_dir_name: Filename,
        python_alias_dir: Filename,
        profile_memory: bool = True,
        reduced_profile: bool = False,
    ) -> bool:
        """Write the profile out."""
        # Get the children's stats, if any.
        if not pid:
            stats.merge_stats(python_alias_dir_name)
            try:
                shutil.rmtree(python_alias_dir)
            except BaseException:
                pass
        current_max: float = stats.max_footprint
        # If we've collected any samples, dump them.
        if (not stats.total_cpu_samples
                and not stats.total_memory_malloc_samples
                and not stats.total_memory_free_samples):
            # Nothing to output.
            return False
        # Collect all instrumented filenames.
        all_instrumented_files: List[Filename] = list(
            set(
                list(stats.cpu_samples_python.keys()) +
                list(stats.cpu_samples_c.keys()) +
                list(stats.memory_free_samples.keys()) +
                list(stats.memory_malloc_samples.keys())))
        if not all_instrumented_files:
            # We didn't collect samples in source files.
            return False
        title = Text()
        mem_usage_line: Union[Text, str] = ""
        growth_rate = 0.0
        if profile_memory:
            samples = stats.memory_footprint_samples
            if len(samples.get()) > 0:
                # Output a sparkline as a summary of memory usage over time.
                _, _, spark_str = sparkline.generate(
                    samples.get()[0:samples.len()], 0, current_max)
                # Compute growth rate (slope), between 0 and 1.
                if stats.allocation_velocity[1] > 0:
                    growth_rate = (100.0 * stats.allocation_velocity[0] /
                                   stats.allocation_velocity[1])
                # If memory used is > 1GB, use GB as the unit.
                if current_max > 1024:
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (" (max: %6.2fGB, growth rate: %3.0f%%)\n" %
                         ((current_max / 1024), growth_rate)),
                    )
                else:
                    # Otherwise, use MB.
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (" (max: %6.2fMB, growth rate: %3.0f%%)\n" %
                         (current_max, growth_rate)),
                    )

        null = open("/dev/null", "w")
        # Get column width of the terminal and adjust to fit.
        # Note that Scalene works best with at least 132 columns.
        if self.html:
            column_width = 132
        else:
            column_width = shutil.get_terminal_size().columns
        console = Console(
            width=column_width,
            record=True,
            force_terminal=True,
            file=null,
        )
        # Build a list of files we will actually report on.
        report_files: List[Filename] = []
        # Sort in descending order of CPU cycles, and then ascending order by filename
        for fname in sorted(
                all_instrumented_files,
                key=lambda f: (-(stats.cpu_samples[f]), f),
        ):
            fname = Filename(fname)
            try:
                percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                    stats.total_cpu_samples)
            except ZeroDivisionError:
                percent_cpu_time = 0

            # Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
            if (stats.malloc_samples[fname] < self.malloc_threshold
                    and percent_cpu_time < self.cpu_percent_threshold):
                continue
            report_files.append(fname)

        # Don't actually output the profile if we are a child process.
        # Instead, write info to disk for the main process to collect.
        if pid:
            stats.output_stats(pid, python_alias_dir_name)
            return True

        for fname in report_files:
            # Print header.
            percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                stats.total_cpu_samples)
            new_title = mem_usage_line + (
                "%s: %% of time = %6.2f%% out of %6.2fs." %
                (fname, percent_cpu_time, stats.elapsed_time))
            # Only display total memory usage once.
            mem_usage_line = ""

            tbl = Table(
                box=box.MINIMAL_HEAVY_HEAD,
                title=new_title,
                collapse_padding=True,
                width=column_width - 1,
            )

            tbl.add_column("Line", justify="right", no_wrap=True)
            tbl.add_column("Time %\nPython", no_wrap=True)
            tbl.add_column("Time %\nnative", no_wrap=True)
            tbl.add_column("Sys\n%", no_wrap=True)
            tbl.add_column("GPU\n%", no_wrap=True)

            other_columns_width = 0  # Size taken up by all columns BUT code

            if profile_memory:
                tbl.add_column("Mem %\nPython", no_wrap=True)
                tbl.add_column("Net\n(MB)", no_wrap=True)
                tbl.add_column("Memory usage\nover time / %", no_wrap=True)
                tbl.add_column("Copy\n(MB/s)", no_wrap=True)
                other_columns_width = 72 + 5  # GPU
                tbl.add_column(
                    "\n" + fname,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )
            else:
                other_columns_width = 36 + 5  # GPU
                tbl.add_column(
                    "\n" + fname,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )

            # Print out the the profile for the source, line by line.
            with open(fname, "r") as source_file:
                # We track whether we should put in ellipsis (for reduced profiles)
                # or not.
                did_print = True  # did we print a profile line last time?
                code_lines = source_file.read()
                # Generate syntax highlighted version for the whole file,
                # which we will consume a line at a time.
                # See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233
                syntax_highlighted = None
                if self.html:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="default",
                        line_numbers=False,
                        code_width=None,
                    )
                else:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="vim",
                        line_numbers=False,
                        code_width=None,
                    )
                capture_console = Console(
                    width=column_width - other_columns_width,
                    force_terminal=True,
                )
                formatted_lines = [
                    SyntaxLine(segments) for segments in
                    capture_console.render_lines(syntax_highlighted)
                ]
                for line_no, line in enumerate(formatted_lines, start=1):
                    old_did_print = did_print
                    did_print = self.output_profile_line(
                        fname,
                        LineNumber(line_no),
                        line,
                        console,
                        tbl,
                        stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=False,
                        is_function_summary=False,
                        reduced_profile=reduced_profile,
                    )
                    if old_did_print and not did_print:
                        # We are skipping lines, so add an ellipsis.
                        tbl.add_row("...")
                    old_did_print = did_print

            # Potentially print a function summary.
            fn_stats = stats.build_function_stats(fname)
            print_fn_summary = False
            for fn_name in fn_stats.cpu_samples_python:
                if fn_name == fname:
                    continue
                print_fn_summary = True
                break

            if print_fn_summary:
                tbl.add_row(None, end_section=True)
                txt = Text.assemble("function summary", style="bold italic")
                if profile_memory:
                    tbl.add_row("", "", "", "", "", "", "", "", "", txt)
                else:
                    tbl.add_row("", "", "", "", "", txt)

                for fn_name in sorted(
                        fn_stats.cpu_samples_python,
                        key=lambda k: stats.firstline_map[k],
                ):
                    if fn_name == fname:
                        continue
                    if self.html:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="default",
                            line_numbers=False,
                            code_width=None,
                        )
                    else:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="vim",
                            line_numbers=False,
                            code_width=None,
                        )
                    # force print, suppress line numbers
                    self.output_profile_line(
                        fn_name,
                        LineNumber(1),
                        syntax_highlighted,  # type: ignore
                        console,
                        tbl,
                        fn_stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=True,
                        is_function_summary=True,
                        reduced_profile=reduced_profile,
                    )

            console.print(tbl)

            # Report top K lines (currently 5) in terms of net memory consumption.
            net_mallocs: Dict[LineNumber, float] = defaultdict(float)
            for line_no in stats.bytei_map[fname]:
                for bytecode_index in stats.bytei_map[fname][line_no]:
                    net_mallocs[line_no] += (stats.memory_malloc_samples[fname]
                                             [line_no][bytecode_index] -
                                             stats.memory_free_samples[fname]
                                             [line_no][bytecode_index])
            net_mallocs = OrderedDict(
                sorted(net_mallocs.items(), key=itemgetter(1), reverse=True))
            if len(net_mallocs) > 0:
                console.print("Top net memory consumption, by line:")
                number = 1
                for net_malloc_lineno in net_mallocs:
                    if net_mallocs[net_malloc_lineno] <= 1:
                        break
                    if number > 5:
                        break
                    output_str = ("(" + str(number) + ") " +
                                  ("%5.0f" % (net_malloc_lineno)) + ": " +
                                  ("%5.0f" %
                                   (net_mallocs[net_malloc_lineno])) + " MB")
                    console.print(output_str)
                    number += 1

            # Only report potential leaks if the allocation velocity (growth rate) is above some threshold
            # FIXME: fixed at 1% for now.
            # We only report potential leaks where the confidence interval is quite tight and includes 1.
            growth_rate_threshold = 0.01
            leak_reporting_threshold = 0.05
            leaks = []
            if growth_rate / 100 > growth_rate_threshold:
                vec = list(stats.leak_score[fname].values())
                keys = list(stats.leak_score[fname].keys())
                for index, item in enumerate(stats.leak_score[fname].values()):
                    # See https://en.wikipedia.org/wiki/Rule_of_succession
                    frees = item[1]
                    allocs = item[0]
                    expected_leak = (frees + 1) / (frees + allocs + 2)
                    if expected_leak <= leak_reporting_threshold:
                        leaks.append((
                            keys[index],
                            1 - expected_leak,
                            net_mallocs[keys[index]],
                        ))
                if len(leaks) > 0:
                    # Report in descending order by least likelihood
                    for leak in sorted(leaks, key=itemgetter(1), reverse=True):
                        output_str = (
                            "Possible memory leak identified at line " +
                            str(leak[0]) + " (estimated likelihood: " +
                            ("%3.0f" %
                             (leak[1] * 100)) + "%" + ", velocity: " +
                            ("%3.0f MB/s" %
                             (leak[2] / stats.elapsed_time)) + ")")
                        console.print(output_str)

        if self.html:
            # Write HTML file.
            md = Markdown(
                "generated by the [scalene](https://github.com/plasma-umass/scalene) profiler"
            )
            console.print(md)
            if not self.output_file:
                self.output_file = "/dev/stdout"
            console.save_html(self.output_file, clear=False)
        else:
            if not self.output_file:
                # No output file specified: write to stdout.
                sys.stdout.write(console.export_text(styles=True))
            else:
                # Don't output styles to text file.
                console.save_text(self.output_file, styles=False, clear=False)
        return True
コード例 #23
0
ファイル: test_console.py プロジェクト: gpsbird/rich
def test_export_text():
    console = Console(record=True, width=100)
    console.print("[b]foo")
    text = console.export_text()
    expected = "foo\n"
    assert text == expected
コード例 #24
0
    def generate(
        self,
        prompt: str = "",
        temperature: float = 0.7,
        max_tokens: int = 32,
        stop: str = "",
        model: str = "davinci",
        bg: tuple = (31, 36, 40),
        accent: tuple = (0, 64, 0),
        pngquant: bool = False,
        output_txt: str = None,
        output_img: str = None,
        include_prompt: bool = True,
        include_coloring: bool = True,
        watermark: str = "Generated using GPT-3 via OpenAI's API",
    ):

        assert isinstance(stop, str), "stop is not a str."

        data = {
            "prompt": prompt,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "stop": stop,
            "stream": True,
            "logprobs": 1,
        }

        console = Console(record=True)
        console.clear()

        if include_prompt:
            prompt_text = Text(prompt, style="bold", end="")
            console.print(prompt_text, end="")

        with httpx.stream(
            "POST",
            f"https://api.openai.com/v1/engines/{model}/completions",
            headers=self.headers,
            data=json.dumps(data),
            timeout=None,
        ) as r:
            for chunk in r.iter_text():
                text = chunk[6:]  # JSON chunks are prepended with "data: "
                if len(text) < 10 and "[DONE]" in text:
                    break

                temp_token = None
                logprobs = json.loads(text)["choices"][0]["logprobs"]
                tokens = logprobs["tokens"]
                token_logprobs = logprobs["token_logprobs"]
                for i in range(len(tokens)):
                    token = tokens[i]
                    log_prob = token_logprobs[i]

                    if token == stop or token == "<|endoftext|>":
                        break

                    if token.startswith("bytes:") and not temp_token:
                        # We need to hold the 2-byte token to the next 1-byte token
                        # to get the full bytestring to decode
                        #
                        # The API-returned tokens are in the form:
                        # "bytes:\xe2\x80" and "bytes:\x9d"
                        temp_token = token[6:]
                        temp_prob = log_prob
                    else:
                        if temp_token:
                            bytestring = temp_token + token[6:]

                            # https://stackoverflow.com/a/37059682/9314418
                            token = codecs.escape_decode(bytestring, "utf-8")[0].decode(
                                "utf-8"
                            )
                            temp_token = None
                            log_prob = temp_prob  # the true prob is the first one
                        text = Text(
                            token,
                            style=f"on {self.derive_token_bg(log_prob, bg, accent, include_coloring,)}",
                            end="",
                        )
                        console.print(text, end="")

        # Export the generated text as HTML.
        raw_html = self.replace_hex_colors(
            console.export_html(inline_styles=True, code_format="{code}", clear=False)
        )

        # Render the HTML as an image
        prompt_hash = hashlib.sha256(bytes(prompt, "utf-8")).hexdigest()[0:8]
        temp_string = str(temperature).replace(".", "_")
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        if output_img:
            img_file_name = output_img
        else:
            if not os.path.exists("img_output"):
                os.makedirs("img_output")
            img_file_name = f"img_output/{timestamp}__{prompt_hash}__{temp_string}.png"

        if self.imgmaker:
            self.imgmaker.generate(
                "dark.html",
                {
                    "html": raw_html.replace("\n", "</br>"),
                    "accent": f"rgb({accent[0]},{accent[1]},{accent[2]})",
                    "watermark": watermark,
                },
                width=450,
                height=600,
                downsample=False,
                output_file=img_file_name,
                use_pngquant=pngquant,
            )

        # Save the generated text to a plain-text file
        if output_txt:
            txt_file_name = output_txt
        else:
            if not os.path.exists("txt_output"):
                os.makedirs("txt_output")
            txt_file_name = f"txt_output/{prompt_hash}__{temp_string}.txt"

        with open(txt_file_name, "a", encoding="utf-8") as f:
            f.write(console.export_text() + "\n" + "=" * 20 + "\n")

        console.line()
コード例 #25
0
ファイル: test_text.py プロジェクト: yalaudah/rich
def test_print(print_text, result):
    console = Console(record=True)
    console.print(*print_text)
    assert console.export_text(styles=False) == result
コード例 #26
0
 def render_text(self) -> str:
     """Exports the info to string"""
     console = Console(record=True)
     with console.capture():
         self.info(console)
     return console.export_text()