Пример #1
0
def test_save_html():
    expected = "<!DOCTYPE html>\n<head>\n<style>\n\nbody {\n    color: #000000;\n    background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n    <code>\n        <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">foo\n</pre>\n    </code>\n</body>\n</html>\n"
    console = Console(record=True, width=100)
    console.print("foo")
    with tempfile.TemporaryDirectory() as path:
        export_path = os.path.join(path, "example.html")
        console.save_html(export_path)
        with open(export_path, "rt") as html_file:
            assert html_file.read() == expected
Пример #2
0
    def output_profiles(
        self,
        stats: ScaleneStatistics,
        pid: int,
        profile_this_code: Callable[[Filename, LineNumber], bool],
        python_alias_dir_name: Filename,
        python_alias_dir: Filename,
        profile_memory: bool = True,
        reduced_profile: bool = False,
    ) -> bool:
        """Write the profile out."""
        # Get the children's stats, if any.
        if not pid:
            stats.merge_stats(python_alias_dir_name)
            try:
                shutil.rmtree(python_alias_dir)
            except BaseException:
                pass
        current_max: float = stats.max_footprint
        # If we've collected any samples, dump them.
        if (not stats.total_cpu_samples
                and not stats.total_memory_malloc_samples
                and not stats.total_memory_free_samples):
            # Nothing to output.
            return False
        # Collect all instrumented filenames.
        all_instrumented_files: List[Filename] = list(
            set(
                list(stats.cpu_samples_python.keys()) +
                list(stats.cpu_samples_c.keys()) +
                list(stats.memory_free_samples.keys()) +
                list(stats.memory_malloc_samples.keys())))
        if not all_instrumented_files:
            # We didn't collect samples in source files.
            return False
        title = Text()
        mem_usage_line: Union[Text, str] = ""
        growth_rate = 0.0
        if profile_memory:
            samples = stats.memory_footprint_samples
            if len(samples.get()) > 0:
                # Output a sparkline as a summary of memory usage over time.
                _, _, spark_str = sparkline.generate(
                    samples.get()[0:samples.len()], 0, current_max)
                # Compute growth rate (slope), between 0 and 1.
                if stats.allocation_velocity[1] > 0:
                    growth_rate = (100.0 * stats.allocation_velocity[0] /
                                   stats.allocation_velocity[1])
                # If memory used is > 1GB, use GB as the unit.
                if current_max > 1024:
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (" (max: %6.2fGB, growth rate: %3.0f%%)\n" %
                         ((current_max / 1024), growth_rate)),
                    )
                else:
                    # Otherwise, use MB.
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (" (max: %6.2fMB, growth rate: %3.0f%%)\n" %
                         (current_max, growth_rate)),
                    )

        null = open("/dev/null", "w")
        # Get column width of the terminal and adjust to fit.
        # Note that Scalene works best with at least 132 columns.
        if self.html:
            column_width = 132
        else:
            column_width = shutil.get_terminal_size().columns
        console = Console(
            width=column_width,
            record=True,
            force_terminal=True,
            file=null,
        )
        # Build a list of files we will actually report on.
        report_files: List[Filename] = []
        # Sort in descending order of CPU cycles, and then ascending order by filename
        for fname in sorted(
                all_instrumented_files,
                key=lambda f: (-(stats.cpu_samples[f]), f),
        ):
            fname = Filename(fname)
            try:
                percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                    stats.total_cpu_samples)
            except ZeroDivisionError:
                percent_cpu_time = 0

            # Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
            if (stats.malloc_samples[fname] < self.malloc_threshold
                    and percent_cpu_time < self.cpu_percent_threshold):
                continue
            report_files.append(fname)

        # Don't actually output the profile if we are a child process.
        # Instead, write info to disk for the main process to collect.
        if pid:
            stats.output_stats(pid, python_alias_dir_name)
            return True

        for fname in report_files:
            # Print header.
            percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                stats.total_cpu_samples)
            new_title = mem_usage_line + (
                "%s: %% of time = %6.2f%% out of %6.2fs." %
                (fname, percent_cpu_time, stats.elapsed_time))
            # Only display total memory usage once.
            mem_usage_line = ""

            tbl = Table(
                box=box.MINIMAL_HEAVY_HEAD,
                title=new_title,
                collapse_padding=True,
                width=column_width - 1,
            )

            tbl.add_column("Line", justify="right", no_wrap=True)
            tbl.add_column("Time %\nPython", no_wrap=True)
            tbl.add_column("Time %\nnative", no_wrap=True)
            tbl.add_column("Sys\n%", no_wrap=True)
            tbl.add_column("GPU\n%", no_wrap=True)

            other_columns_width = 0  # Size taken up by all columns BUT code

            if profile_memory:
                tbl.add_column("Mem %\nPython", no_wrap=True)
                tbl.add_column("Net\n(MB)", no_wrap=True)
                tbl.add_column("Memory usage\nover time / %", no_wrap=True)
                tbl.add_column("Copy\n(MB/s)", no_wrap=True)
                other_columns_width = 72 + 5  # GPU
                tbl.add_column(
                    "\n" + fname,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )
            else:
                other_columns_width = 36 + 5  # GPU
                tbl.add_column(
                    "\n" + fname,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )

            # Print out the the profile for the source, line by line.
            with open(fname, "r") as source_file:
                # We track whether we should put in ellipsis (for reduced profiles)
                # or not.
                did_print = True  # did we print a profile line last time?
                code_lines = source_file.read()
                # Generate syntax highlighted version for the whole file,
                # which we will consume a line at a time.
                # See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233
                syntax_highlighted = None
                if self.html:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="default",
                        line_numbers=False,
                        code_width=None,
                    )
                else:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="vim",
                        line_numbers=False,
                        code_width=None,
                    )
                capture_console = Console(
                    width=column_width - other_columns_width,
                    force_terminal=True,
                )
                formatted_lines = [
                    SyntaxLine(segments) for segments in
                    capture_console.render_lines(syntax_highlighted)
                ]
                for line_no, line in enumerate(formatted_lines, start=1):
                    old_did_print = did_print
                    did_print = self.output_profile_line(
                        fname,
                        LineNumber(line_no),
                        line,
                        console,
                        tbl,
                        stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=False,
                        is_function_summary=False,
                        reduced_profile=reduced_profile,
                    )
                    if old_did_print and not did_print:
                        # We are skipping lines, so add an ellipsis.
                        tbl.add_row("...")
                    old_did_print = did_print

            # Potentially print a function summary.
            fn_stats = stats.build_function_stats(fname)
            print_fn_summary = False
            for fn_name in fn_stats.cpu_samples_python:
                if fn_name == fname:
                    continue
                print_fn_summary = True
                break

            if print_fn_summary:
                tbl.add_row(None, end_section=True)
                txt = Text.assemble("function summary", style="bold italic")
                if profile_memory:
                    tbl.add_row("", "", "", "", "", "", "", "", "", txt)
                else:
                    tbl.add_row("", "", "", "", "", txt)

                for fn_name in sorted(
                        fn_stats.cpu_samples_python,
                        key=lambda k: stats.firstline_map[k],
                ):
                    if fn_name == fname:
                        continue
                    if self.html:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="default",
                            line_numbers=False,
                            code_width=None,
                        )
                    else:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="vim",
                            line_numbers=False,
                            code_width=None,
                        )
                    # force print, suppress line numbers
                    self.output_profile_line(
                        fn_name,
                        LineNumber(1),
                        syntax_highlighted,  # type: ignore
                        console,
                        tbl,
                        fn_stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=True,
                        is_function_summary=True,
                        reduced_profile=reduced_profile,
                    )

            console.print(tbl)

            # Report top K lines (currently 5) in terms of net memory consumption.
            net_mallocs: Dict[LineNumber, float] = defaultdict(float)
            for line_no in stats.bytei_map[fname]:
                for bytecode_index in stats.bytei_map[fname][line_no]:
                    net_mallocs[line_no] += (stats.memory_malloc_samples[fname]
                                             [line_no][bytecode_index] -
                                             stats.memory_free_samples[fname]
                                             [line_no][bytecode_index])
            net_mallocs = OrderedDict(
                sorted(net_mallocs.items(), key=itemgetter(1), reverse=True))
            if len(net_mallocs) > 0:
                console.print("Top net memory consumption, by line:")
                number = 1
                for net_malloc_lineno in net_mallocs:
                    if net_mallocs[net_malloc_lineno] <= 1:
                        break
                    if number > 5:
                        break
                    output_str = ("(" + str(number) + ") " +
                                  ("%5.0f" % (net_malloc_lineno)) + ": " +
                                  ("%5.0f" %
                                   (net_mallocs[net_malloc_lineno])) + " MB")
                    console.print(output_str)
                    number += 1

            # Only report potential leaks if the allocation velocity (growth rate) is above some threshold
            # FIXME: fixed at 1% for now.
            # We only report potential leaks where the confidence interval is quite tight and includes 1.
            growth_rate_threshold = 0.01
            leak_reporting_threshold = 0.05
            leaks = []
            if growth_rate / 100 > growth_rate_threshold:
                vec = list(stats.leak_score[fname].values())
                keys = list(stats.leak_score[fname].keys())
                for index, item in enumerate(stats.leak_score[fname].values()):
                    # See https://en.wikipedia.org/wiki/Rule_of_succession
                    frees = item[1]
                    allocs = item[0]
                    expected_leak = (frees + 1) / (frees + allocs + 2)
                    if expected_leak <= leak_reporting_threshold:
                        leaks.append((
                            keys[index],
                            1 - expected_leak,
                            net_mallocs[keys[index]],
                        ))
                if len(leaks) > 0:
                    # Report in descending order by least likelihood
                    for leak in sorted(leaks, key=itemgetter(1), reverse=True):
                        output_str = (
                            "Possible memory leak identified at line " +
                            str(leak[0]) + " (estimated likelihood: " +
                            ("%3.0f" %
                             (leak[1] * 100)) + "%" + ", velocity: " +
                            ("%3.0f MB/s" %
                             (leak[2] / stats.elapsed_time)) + ")")
                        console.print(output_str)

        if self.html:
            # Write HTML file.
            md = Markdown(
                "generated by the [scalene](https://github.com/plasma-umass/scalene) profiler"
            )
            console.print(md)
            if not self.output_file:
                self.output_file = "/dev/stdout"
            console.save_html(self.output_file, clear=False)
        else:
            if not self.output_file:
                # No output file specified: write to stdout.
                sys.stdout.write(console.export_text(styles=True))
            else:
                # Don't output styles to text file.
                console.save_text(self.output_file, styles=False, clear=False)
        return True
        return _emoji_replace(text)

    def __repr__(self) -> str:
        return f"<emoji {self.name!r}>"

    def __str__(self) -> str:
        return self._char

    def __rich_console__(self, console: Console,
                         options: ConsoleOptions) -> RenderResult:
        yield Segment(self._char, console.get_style(self.style))


if __name__ == "__main__":  # pragma: no cover
    import sys

    from rich.columns import Columns
    from rich.console import Console

    console = Console(record=True)

    columns = Columns(
        (f":{name}: {name}"
         for name in sorted(EMOJI.keys()) if "\u200D" not in name),
        column_first=True,
    )

    console.print(columns)
    if len(sys.argv) > 1:
        console.save_html(sys.argv[1])
Пример #4
0
from rich.console import Console
from rich.panel import Panel
from rich.tree import Tree

console = Console(record=True, width=100)

tree = Tree("🤓 [link=https://www.willmcgugan.com]Will McGugan", guide_style="bold cyan")
python_tree = tree.add("🐍 Python expert", guide_style="green")
python_tree.add("⭐ [link=https://github.com/willmcgugan/rich]Rich")
python_tree.add("⭐ [link=https://github.com/pyfilesystem/pyfilesystem2]PyFilesystem")
python_tree.add("⭐ [link=https://github.com/wildfoundry/dataplicity-lomond]Lomond")
full_stack_tree = tree.add("🔧 Full-stack developer")
tree.add("📘 Author")

about = """\
I'm a freelance software developer, living in [link=https://www.google.com/maps/place/Edinburgh/@55.9411289,-3.3454205,11z]Edinburgh[/], Scotland. Other than open source software development, my passion would be [link=https://www.willmcgugan.com/blog/photography/]wildlife photography[/].

[green]Follow me on twitter [bold link=https://twitter.com/willmcgugan]@willmcgugan[/]"""

panel = Panel.fit(
    about, box=box.DOUBLE, border_style="blue", title="[b]Hi there", width=60
)

console.print(Columns([panel, tree]))

CONSOLE_HTML_FORMAT = """\
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
"""

console.save_html("README.md", inline_styles=True, code_format=CONSOLE_HTML_FORMAT)
Пример #5
0
        row.append(', '.join(sorted(versions)))
    else:
        row.append('')

    if is_upper:
        upper_rows.append(row)
    else:
        bottom_rows.append(row)

upper_rows = sorted(upper_rows, key=lambda x: x[0])
bottom_rows = sorted(bottom_rows, key=lambda x: x[0])

for row in upper_rows:
    table.add_row(*row)
for row in bottom_rows:
    if row == bottom_rows[-1]:
        table.add_row(*row, end_section=True)
    else:
        table.add_row(*row)

summary_row = ["Number of available packages"]
for arch in archs:
    summary_row.append(f"{num_pkgs_per_arch[arch]} / {len(availability)}")
summary_row.append("")
table.add_row(*summary_row, style="bold magenta")

console.print(table)
console.save_html(distro + ".html")

# import IPython; IPython.embed()
Пример #6
0
def main():
    args = parser.parse_args()

    console = Console()

    run_info = {}

    previous_timestamp = None

    for run_id in get_run_identifiers(get_bucket_url(args), count=args.count):
        run_data = {
            "filter_size": None,
            "stash_size": None,
            "crl_audit": None,
            "filter_layers": None,
            "knownrevoked": None,
            "knownnotrevoked": None,
            "timestamp": None,
            "coverage_period": None,
        }
        stats = json.loads(
            download_from_google_cloud_to_string(
                get_bucket_url(args), Path(run_id) / "mlbf" / "stats.json"
            )
        )
        run_data["filter_layers"] = f"{stats['mlbf_layers']}"

        if "knownrevoked" in stats:
            run_data["knownrevoked"] = f"{stats['knownrevoked']:,}"

        if "knownnotrevoked" in stats:
            run_data["knownnotrevoked"] = f"{stats['knownnotrevoked']:,}"

        if "mlbf_filesize" in stats:
            run_data["filter_size"] = size_to_str(stats["mlbf_filesize"])
        else:
            filter_metadata = metadata_from_google_cloud(
                get_bucket_url(args), Path(run_id) / "mlbf" / "filter"
            )
            run_data["filter_size"] = size_to_str(filter_metadata["size"])

        if "stash_filesize" in stats:
            run_data["stash_size"] = size_to_str(stats["stash_filesize"])
            run_data["stash_num_issuers"] = str(stats["stash_num_issuers"])
        else:
            try:
                stash_metadata = metadata_from_google_cloud(
                    get_bucket_url(args), Path(run_id) / "mlbf" / "filter.stash"
                )
                run_data["stash_size"] = size_to_str(stash_metadata["size"])
                run_data["stash_num_issuers"] = "n/a"
            except FileNotFoundException:
                pass

        ts = datetime.fromisoformat(
            download_from_google_cloud_to_string(
                get_bucket_url(args), Path(run_id) / "timestamp"
            )
        ).replace(tzinfo=timezone.utc)
        run_data["timestamp"] = ts
        if previous_timestamp:
            run_data["coverage_period"] = str(ts - previous_timestamp)
        previous_timestamp = ts

        audit_dir_local = args.auditdb.expanduser()
        audit_dir_local.mkdir(exist_ok=True, parents=True)
        local_audit_path = audit_dir_local / f"{run_id}-crl-audit.json"
        try:
            if not local_audit_path.is_file():
                download_from_google_cloud(
                    get_bucket_url(args),
                    Path(run_id) / "crl-audit.json",
                    local_audit_path,
                )
        except FileNotFoundException:
            pass
        with local_audit_path.open("r") as jf:
            run_data["crl_audit"] = json.load(jf)

        local_enrolled_path = audit_dir_local / f"{run_id}-enrolled.json"
        try:
            if not local_enrolled_path.is_file():
                download_from_google_cloud(
                    get_bucket_url(args),
                    Path(run_id) / "enrolled.json",
                    local_enrolled_path,
                )
        except FileNotFoundException:
            pass
        with local_enrolled_path.open("r") as jf:
            run_data["enrolled"] = json.load(jf)

        run_info[run_id] = run_data

    all_runs = sorted(run_info.keys(), key=normalize_identifier, reverse=True)

    size_table = Table(title="Recent Run Data", show_header=True)
    size_table.add_column("Run ID")
    size_table.add_column("Run Time")
    size_table.add_column("Filter")
    size_table.add_column("Filter Layers")
    size_table.add_column("Enrolled Issuers")
    size_table.add_column("Stash")
    size_table.add_column("Known Revoked")
    size_table.add_column("Known Not Revoked")
    size_table.add_column("Period Covered")
    previous_timestamp = None
    for run_id in all_runs:
        if "enrolled" in run_info[run_id]:
            enrolled_len = str(
                len(list(filter(lambda x: x["enrolled"], run_info[run_id]["enrolled"])))
            )
        else:
            enrolled_len = "n/a"

        size_table.add_row(
            run_id,
            f"{run_info[run_id]['timestamp']:%Y-%m-%d %H:%M}Z",
            run_info[run_id]["filter_size"],
            run_info[run_id]["filter_layers"],
            enrolled_len,
            f"{run_info[run_id]['stash_size']} ({run_info[run_id]['stash_num_issuers']} issuers)",
            run_info[run_id]["knownrevoked"],
            run_info[run_id]["knownnotrevoked"],
            run_info[run_id]["coverage_period"],
        )
    console.print(size_table)

    if args.crl or args.crl_details or args.crl_details_all:
        summary_tables = list()
        detail_console = Console(file=io.StringIO(), record=True)

        for run_id in all_runs:
            issuer_to_crl_audit = {}
            for entry in filter(
                is_important_crl_audit_entry, run_info[run_id]["crl_audit"]["Entries"]
            ):
                if entry["IssuerSubject"] not in issuer_to_crl_audit:
                    issuer_to_crl_audit[entry["IssuerSubject"]] = {
                        "crls": [],
                        "issuerPubKeyHash": entry["Issuer"],
                    }
                issuer_to_crl_audit[entry["IssuerSubject"]]["crls"].append(entry)

            table = Table(
                title=f"{run_id} CRL Audit Entries by Issuer/Status", show_header=True
            )
            table.add_column("Issuer")
            table.add_column("Enrolled")
            table.add_column("Number of Failed CRLs")
            table.add_column("Number of Recovered CRLs")
            table.add_column("Number of Updated CRLs")
            table.add_column("Number of CRLs")

            detail_console.rule(f"{run_id} CRL Audit Entries")

            for issuerSubject in issuer_to_crl_audit:
                enrolled = is_enrolled(
                    issuer_to_crl_audit[issuerSubject]["issuerPubKeyHash"],
                    runinfo=run_info[run_id],
                )

                # for kind, crls in itertools.groupby(
                #     sorted(
                #         issuer_to_crl_audit[issuerSubject]["crls"],
                #         key=lambda x: x["Kind"],
                #     ),
                #     key=lambda x: x["Kind"],
                # ):
                #     num_entries = len(list(crls))
                #     table.add_row(
                #         issuerSubject,
                #         kind,
                #         enrolled,
                #         str(num_entries),
                #     )

                num_failed = 0
                num_recovered = 0
                num_updated = 0
                num_total = 0

                issuer_table = Table(
                    title=f"{issuerSubject} CRLs - {enrolled}", show_header=True
                )
                issuer_table.add_column("URL")
                issuer_table.add_column("Statuses")
                issuer_table.add_column("Details")

                for url, entries_grp in itertools.groupby(
                    issuer_to_crl_audit[issuerSubject]["crls"], key=lambda x: x["Url"]
                ):
                    num_total += 1

                    entries = list(entries_grp)
                    statuses = list(map(lambda x: x["Kind"], entries))

                    if "Valid, Processed" in statuses:
                        if len(statuses) == 1:
                            num_updated += 1
                        else:
                            num_recovered += 1
                    else:
                        num_failed += 1

                    if (
                        len(statuses) == 1
                        and "Valid, Processed" in statuses
                        and not args.crl_details_all
                    ):
                        continue

                    issuer_table.add_row(
                        url,
                        f"{statuses}",
                        f"{entries}",
                    )
                    detail_console.print(issuer_table)

                table.add_row(
                    issuerSubject,
                    enrolled,
                    f"{num_failed}",
                    f"{num_recovered}",
                    f"{num_updated}",
                    f"{num_total}",
                )

            console.print(table)
            summary_tables.append(table)

        if args.crl_details:
            console.log(f"Writing CRL details to {args.crl_details}")
            detail_console.rule(f"Summary Tables")
            for table in summary_tables:
                detail_console.print(table)
            detail_console.save_html(args.crl_details)
Пример #7
0
print_table()

# Get console output as text
file1 = "table_export_plaintext.txt"
text = console.export_text()
with open(file1, "w") as file:
    file.write(text)
print(f"Exported console output as plain text to {file1}")

# Calling print_table again because console output buffer
# is flushed once export function is called
print_table()

# Get console output as html
# use clear=False so output is not flushed after export
file2 = "table_export_html.html"
html = console.export_html(clear=False)
with open(file2, "w") as file:
    file.write(html)
print(f"Exported console output as html to {file2}")

# Export text output to table_export.txt
file3 = "table_export_plaintext2.txt"
console.save_text(file3, clear=False)
print(f"Exported console output as plain text to {file3}")

# Export html output to table_export.html
file4 = "table_export_html2.html"
console.save_html(file4)
print(f"Exported console output as html to {file4}")
Пример #8
0
for name, pkg in availability.items():
    row = [name]
    versions = set()
    is_upper = False
    for arch in archs:
        if pkg.get(arch):
            row.append("[green]✔")
            is_upper = True
            versions |= pkg[arch]
        else:
            row.append("[red]✖")

    if versions:
        row.append(', '.join(sorted(versions)))
    else:
        row.append('')

    if is_upper:
        upper_rows.append(row)
    else:
        bottom_rows.append(row)

for row in upper_rows:
    table.add_row(*row)
for row in bottom_rows:
    table.add_row(*row)

console.print(table)
console.save_html("version_compare.html")

# import IPython; IPython.embed()
Пример #9
0
top_schools = sorted(data, key=lambda i: i['total_cases'],
                     reverse=True)[:NUMBER_OF_SCHOOLS]
for school in track(top_schools):
    school_name = school['school_or_school_district']
    total_cases = school['total_cases']
    new_student_cases = school['student_cases_new']
    new_staff_cases = school['staff_cases_new']
    number_of_students = get_num_students(school_name)
    if number_of_students is None:
        number_of_students = "N/A"
    if type(number_of_students) is int:
        percentage = "{:.2%}".format(total_cases / number_of_students)
    else:
        percentage = "N/A"
    table.add_row(str(school_name), str(total_cases), str(new_student_cases),
                  str(new_staff_cases), str(number_of_students),
                  str(percentage))

console = Console(record=True)
console.print(table)
console.save_html('index.html')

style_html = "<style>body { text-align: center; font-family: monospace; margin: 2%; }</style>"
graphs_link_html = "<a href='/ohio-covid19/'>graphs</a>"

file_prepend('index.html', style_html)

html_file = open('index.html', "a")
html_file.writelines(graphs_link_html)
html_file.close()
Пример #10
0
    ]

    for sp_class in create_subparsers:
        sp_class.add_subparser(subparsers)

    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    no_cache = args.no_cache or FORMAT_OPTS[args.format]["disable_cache"]
    if no_cache:
        requests_cache.core.clear()

    with console.status("Connecting to yes® platform ...") as status:
        api = YesPlatformAPI(
            **load(args.credentials_file.read(), Loader=SafeLoader))

    data: Dataset = args.sp_class.get(api, args)

    FORMAT_OPTS[args.format]["function"](
        data,
        formatter=FORMAT_OPTS[args.format]["formatter"],
        cache_disabled=no_cache,
    )
    if args.export:
        console.save_html(args.export)
        console.print(f"[red]Output exported to {args.export}.")

    requests_cache.core.remove_expired_responses()
# Console log con timestamp e linea di codice del log per debug
print("\nDebug con timestamp e linea del codice che fa il log\n")
import time

for i in range(10):
    console.log(f"I am about to sleep={i}")
    time.sleep(0.2)
    console.log(f"But I am briefly awake now.")

# Console log con variabili locali per debug e eventuale salvataggio in un file separato
print("\nDebug con variabili locali (rich.traceback)")

from rich.traceback import install
install()


def add_two(n1, n2):
    console.log("About to add two numbers.", log_locals=True)
    return n1 + n2


console = Console(record=True)
try:
    for i in range(10):
        time.sleep(0.2)
        add_two(1, i)
    add_two(1, 'a')
except:
    console.print_exception
console.save_html("traceback.html")
Пример #12
0
from rich.console import Console
from rich.table import Table

table = Table(title="Star Wars Movies")

table.add_column("Released", justify="right", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")

table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rouge One: A Star Wars Story", "$1,332,439,889")

console = Console(record=True)
console.print(table)
console.save_html("table.html", inline_styles=True)
Пример #13
0
from rich.console import Console
from time import sleep

rc = Console(record=True)

rc.log("[blue]Welcome[/blue] to [green]rich![/green]")

for i in range(1, 10):
    rc.log(":apple:" * i)
    try:
        if i > 8:
            raise ValueError("Too much!")
    except Exception as e:
        rc.log("Can't eat that much!")

rc.save_html('richie.html')