Example #1
0
 def connect_to_devices(self):
     """Connect to all the devices"""
     print(Panel.fit(Text.from_markup(GREETING)))
Example #2
0
def test_stylize():
    test = Text("Hello, World!")
    test.stylize("bold", 7, 11)
    assert test._spans == [Span(7, 11, "bold")]
    test.stylize("bold", 20, 25)
    assert test._spans == [Span(7, 11, "bold")]
Example #3
0
def test_console_width():
    console = Console()
    test = Text("Hello World!\nfoobarbaz")
    assert test.__rich_measure__(console, 80) == Measurement(9, 12)
    assert Text(" " * 4).__rich_measure__(console, 80) == Measurement(4, 4)
Example #4
0
 def sentence(self, sent):
     sent_text = Text(sent, style="bold white on dark_green")
     console.print(sent_text)
Example #5
0
    def output_profile_line(
        self,
        fname: Filename,
        line_no: LineNumber,
        line: SyntaxLine,
        console: Console,
        tbl: Table,
        stats: ScaleneStatistics,
        profile_this_code: Callable[[Filename, LineNumber], bool],
        force_print: bool = False,
        suppress_lineno_print: bool = False,
        is_function_summary: bool = False,
        profile_memory: bool = False,
        reduced_profile: bool = False,
    ) -> bool:
        """Print at most one line of the profile (true == printed one)."""
        if not force_print and not profile_this_code(fname, line_no):
            return False
        current_max = stats.max_footprint
        # Prepare output values.
        n_cpu_samples_c = stats.cpu_samples_c[fname][line_no]
        # Correct for negative CPU sample counts. This can happen
        # because of floating point inaccuracies, since we perform
        # subtraction to compute it.
        if n_cpu_samples_c < 0:
            n_cpu_samples_c = 0
        n_cpu_samples_python = stats.cpu_samples_python[fname][line_no]
        n_gpu_samples = stats.gpu_samples[fname][line_no]

        # Compute percentages of CPU time.
        if stats.total_cpu_samples != 0:
            n_cpu_percent_c = n_cpu_samples_c * 100 / stats.total_cpu_samples
            n_cpu_percent_python = (n_cpu_samples_python * 100 /
                                    stats.total_cpu_samples)
        else:
            n_cpu_percent_c = 0
            n_cpu_percent_python = 0

        if stats.total_gpu_samples != 0:
            n_gpu_percent = n_gpu_samples * 100 / stats.total_gpu_samples
        else:
            n_gpu_percent = 0

        # Now, memory stats.
        # Accumulate each one from every byte index.
        n_malloc_mb = 0.0
        n_python_malloc_mb = 0.0
        n_free_mb = 0.0
        for index in stats.bytei_map[fname][line_no]:
            mallocs = stats.memory_malloc_samples[fname][line_no][index]
            n_malloc_mb += mallocs
            n_python_malloc_mb += stats.memory_python_samples[fname][line_no][
                index]
            frees = stats.memory_free_samples[fname][line_no][index]
            n_free_mb += frees

        n_usage_fraction = (0 if not stats.total_memory_malloc_samples else
                            n_malloc_mb / stats.total_memory_malloc_samples)
        n_python_fraction = (
            0 if not n_malloc_mb else n_python_malloc_mb /
            stats.total_memory_malloc_samples  # was / n_malloc_mb
        )

        if False:
            # Currently disabled; possibly use in another column?
            # Normalize by number of samples ("net *average*")
            for bytei in stats.memory_malloc_count[fname][
                    line_no]:  # type : ignore
                count = stats.memory_malloc_count[fname][line_no][bytei]
                if count > 0:
                    n_malloc_mb /= count
                    n_python_malloc_mb /= count
            for bytei in stats.memory_free_count[fname][line_no]:
                count = stats.memory_free_count[fname][line_no][bytei]
                if count > 0:
                    n_free_mb /= count

        n_growth_mb = n_malloc_mb - n_free_mb
        if -1 < n_growth_mb < 1:
            # Don't print out "-0" or anything below 1.
            n_growth_mb = 0

        n_cpu_percent = n_cpu_percent_c + n_cpu_percent_python
        n_sys_percent = n_cpu_percent * (
            1.0 - (stats.cpu_utilization[fname][line_no].mean()))

        # Adjust CPU time by utilization.
        n_cpu_percent_python *= stats.cpu_utilization[fname][line_no].mean()
        n_cpu_percent_c *= stats.cpu_utilization[fname][line_no].mean()

        # Finally, print results.
        n_cpu_percent_c_str: str = ("" if n_cpu_percent_c < 1 else
                                    f"{n_cpu_percent_c:5.0f}%")

        n_gpu_percent_str: str = ("" if n_gpu_percent < 1 else
                                  f"{n_gpu_percent:3.0f}%")

        n_cpu_percent_python_str: str = ("" if n_cpu_percent_python < 1 else
                                         f"{n_cpu_percent_python:5.0f}%")
        n_growth_mem_str = ""
        if n_growth_mb < 1024:
            n_growth_mem_str = ("" if
                                (not n_growth_mb and not n_usage_fraction) else
                                f"{n_growth_mb:5.0f}M")
        else:
            n_growth_mem_str = ("" if
                                (not n_growth_mb and not n_usage_fraction) else
                                f"{(n_growth_mb / 1024):5.2f}G")

        n_usage_fraction_str: str = ("" if n_usage_fraction < 0.01 else
                                     f"{(100 * n_usage_fraction):4.0f}%")
        n_python_fraction_str: str = ("" if n_python_fraction < 0.01 else
                                      f"{(n_python_fraction * 100):4.0f}%")
        n_copy_b = stats.memcpy_samples[fname][line_no]
        n_copy_mb_s = n_copy_b / (1024 * 1024 * stats.elapsed_time)
        n_copy_mb_s_str: str = ("" if n_copy_mb_s < 0.5 else
                                f"{n_copy_mb_s:6.0f}")

        # Only report utilization where there is more than 1% CPU total usage,
        # and the standard error of the mean is low (meaning it's an accurate estimate).
        sys_str: str = (
            "" if n_sys_percent < 1
            # or stats.cpu_utilization[fname][line_no].size() <= 1
            # or stats.cpu_utilization[fname][line_no].sem() > 0.025
            # or stats.cpu_utilization[fname][line_no].mean() > 0.99
            else f"{n_sys_percent:4.0f}%")
        if not is_function_summary:
            print_line_no = "" if suppress_lineno_print else str(line_no)
        else:
            print_line_no = ("" if fname not in stats.firstline_map else str(
                stats.firstline_map[fname]))
        if profile_memory:
            spark_str: str = ""
            # Scale the sparkline by the usage fraction.
            samples = stats.per_line_footprint_samples[fname][line_no]
            for i in range(0, len(samples.get())):
                samples.get()[i] *= n_usage_fraction
            if samples.get():
                _, _, spark_str = sparkline.generate(
                    samples.get()[0:samples.len()], 0, current_max)

            # Red highlight
            ncpps: Any = ""
            ncpcs: Any = ""
            nufs: Any = ""
            ngpus: Any = ""

            if (n_usage_fraction >= self.highlight_percentage
                    or (n_cpu_percent_c + n_cpu_percent_python + n_gpu_percent)
                    >= self.highlight_percentage):
                ncpps = Text.assemble((n_cpu_percent_python_str, "bold red"))
                ncpcs = Text.assemble((n_cpu_percent_c_str, "bold red"))
                nufs = Text.assemble(
                    (spark_str + n_usage_fraction_str, "bold red"))
                ngpus = Text.assemble((n_gpu_percent_str, "bold red"))
            else:
                ncpps = n_cpu_percent_python_str
                ncpcs = n_cpu_percent_c_str
                ngpus = n_gpu_percent_str
                nufs = spark_str + n_usage_fraction_str

            if not reduced_profile or ncpps + ncpcs + nufs:
                if self.gpu:
                    tbl.add_row(
                        print_line_no,
                        ncpps,  # n_cpu_percent_python_str,
                        ncpcs,  # n_cpu_percent_c_str,
                        sys_str,
                        ngpus,
                        n_python_fraction_str,
                        n_growth_mem_str,
                        nufs,  # spark_str + n_usage_fraction_str,
                        n_copy_mb_s_str,
                        line,
                    )
                else:
                    tbl.add_row(
                        print_line_no,
                        ncpps,  # n_cpu_percent_python_str,
                        ncpcs,  # n_cpu_percent_c_str,
                        sys_str,
                        n_python_fraction_str,
                        n_growth_mem_str,
                        nufs,  # spark_str + n_usage_fraction_str,
                        n_copy_mb_s_str,
                        line,
                    )
                return True
            else:
                return False

        else:

            # Red highlight
            if (n_cpu_percent_c + n_cpu_percent_python +
                    n_gpu_percent) >= self.highlight_percentage:
                ncpps = Text.assemble((n_cpu_percent_python_str, "bold red"))
                ncpcs = Text.assemble((n_cpu_percent_c_str, "bold red"))
                ngpus = Text.assemble((n_gpu_percent_str, "bold red"))
            else:
                ncpps = n_cpu_percent_python_str
                ncpcs = n_cpu_percent_c_str
                ngpus = n_gpu_percent_str

            if not reduced_profile or ncpps + ncpcs:
                if self.gpu:
                    tbl.add_row(
                        print_line_no,
                        ncpps,  # n_cpu_percent_python_str,
                        ncpcs,  # n_cpu_percent_c_str,
                        sys_str,
                        ngpus,  # n_gpu_percent_str
                        line,
                    )
                else:
                    tbl.add_row(
                        print_line_no,
                        ncpps,  # n_cpu_percent_python_str,
                        ncpcs,  # n_cpu_percent_c_str,
                        sys_str,
                        line,
                    )

                return True
            else:
                return False
Example #6
0
    def generate(
        self,
        prompt: str = "",
        temperature: float = 0.7,
        max_tokens: int = 32,
        stop: str = "",
        model: str = "davinci",
        bg: tuple = (31, 36, 40),
        accent: tuple = (0, 64, 0),
        pngquant: bool = False,
        output_txt: str = None,
        output_img: str = None,
        include_prompt: bool = True,
        include_coloring: bool = True,
        watermark: str = "Generated using GPT-3 via OpenAI's API",
    ):

        assert isinstance(stop, str), "stop is not a str."

        data = {
            "prompt": prompt,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "stop": stop,
            "stream": True,
            "logprobs": 1,
        }

        console = Console(record=True)
        console.clear()

        if include_prompt:
            prompt_text = Text(prompt, style="bold", end="")
            console.print(prompt_text, end="")

        with httpx.stream(
                "POST",
                f"https://api.openai.com/v1/engines/{model}/completions",
                headers=self.headers,
                data=json.dumps(data),
                timeout=None,
        ) as r:
            for chunk in r.iter_text():
                text = chunk[6:]  # JSON chunks are prepended with "data: "
                if len(text) < 10 and "[DONE]" in text:
                    break

                temp_token = None
                logprobs = json.loads(text)["choices"][0]["logprobs"]
                tokens = logprobs["tokens"]
                token_logprobs = logprobs["token_logprobs"]
                for i in range(len(tokens)):
                    token = tokens[i]
                    log_prob = token_logprobs[i]

                    if token == stop or token == "<|endoftext|>":
                        break

                    if token.startswith("bytes:") and not temp_token:
                        # We need to hold the 2-byte token to the next 1-byte token
                        # to get the full bytestring to decode
                        #
                        # The API-returned tokens are in the form:
                        # "bytes:\xe2\x80" and "bytes:\x9d"
                        temp_token = token[6:]
                        temp_prob = log_prob
                    else:
                        if temp_token:
                            bytestring = temp_token + token[6:]

                            # https://stackoverflow.com/a/37059682/9314418
                            token = codecs.escape_decode(
                                bytestring, "utf-8")[0].decode("utf-8")
                            temp_token = None
                            log_prob = temp_prob  # the true prob is the first one
                        text = Text(
                            token,
                            style=
                            f"on {self.derive_token_bg(log_prob, bg, accent, include_coloring,)}",
                            end="",
                        )
                        console.print(text, end="")

        # Export the generated text as HTML.
        raw_html = self.replace_hex_colors(
            console.export_html(inline_styles=True,
                                code_format="{code}",
                                clear=False))

        # Render the HTML as an image
        prompt_hash = hashlib.sha256(bytes(prompt, "utf-8")).hexdigest()[0:8]
        temp_string = str(temperature).replace(".", "_")
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        if output_img:
            img_file_name = output_img
        else:
            if not os.path.exists("img_output"):
                os.makedirs("img_output")
            img_file_name = f"img_output/{timestamp}__{prompt_hash}__{temp_string}.png"

        if self.imgmaker:
            self.imgmaker.generate(
                "dark.html",
                {
                    "html": raw_html.replace("\n", "</br>"),
                    "accent": f"rgb({accent[0]},{accent[1]},{accent[2]})",
                    "watermark": watermark,
                },
                width=450,
                height=600,
                downsample=False,
                output_file=img_file_name,
                use_pngquant=pngquant,
            )

        # Save the generated text to a plain-text file
        if output_txt:
            txt_file_name = output_txt
        else:
            if not os.path.exists("txt_output"):
                os.makedirs("txt_output")
            txt_file_name = f"txt_output/{prompt_hash}__{temp_string}.txt"

        with open(txt_file_name, "a", encoding="utf-8") as f:
            f.write(console.export_text() + "\n" + "=" * 20 + "\n")

        console.line()
Example #7
0
 def word(self, word):
     word_text = Text(word, style="bold white on navy_blue")
     console.print(word_text)
Example #8
0
def render_tables():
    console = Console(
        width=60,
        force_terminal=True,
        file=io.StringIO(),
        legacy_windows=False,
        color_system=None,
        _environ={},
    )

    table = Table(title="test table", caption="table caption", expand=False)
    table.add_column("foo",
                     footer=Text("total"),
                     no_wrap=True,
                     overflow="ellipsis")
    table.add_column("bar", justify="center")
    table.add_column("baz", justify="right")

    table.add_row("Averlongwordgoeshere", "banana pancakes", None)

    assert Measurement.get(console, console.options,
                           table) == Measurement(41, 48)
    table.expand = True
    assert Measurement.get(console, console.options,
                           table) == Measurement(41, 48)

    for width in range(10, 60, 5):
        console.print(table, width=width)

    table.expand = False
    console.print(table, justify="left")
    console.print(table, justify="center")
    console.print(table, justify="right")

    assert table.row_count == 1

    table.row_styles = ["red", "yellow"]
    table.add_row("Coffee")
    table.add_row("Coffee", "Chocolate", None, "cinnamon")

    assert table.row_count == 3

    console.print(table)

    table.show_lines = True
    console.print(table)

    table.show_footer = True
    console.print(table)

    table.show_edge = False

    console.print(table)

    table.padding = 1
    console.print(table)

    table.width = 20
    assert Measurement.get(console, console.options,
                           table) == Measurement(20, 20)
    table.expand = False
    assert Measurement.get(console, console.options,
                           table) == Measurement(20, 20)
    table.expand = True
    console.print(table)

    table.columns[0].no_wrap = True
    table.columns[1].no_wrap = True
    table.columns[2].no_wrap = True

    console.print(table)

    table.padding = 0
    table.width = 60
    table.leading = 1
    console.print(table)

    return console.file.getvalue()
Example #9
0
import re
from textwrap import dedent
from typing import Tuple

from rich.console import Console
from rich.text import Text, Lines

from pyvem._util import shell_dimensions
from pyvem._config import rich_theme
from rich.theme import Theme

_DEFAULT_WRAP_WIDTH = 80
_DEFAULT_PAD_SIZE = 4

_text = Text(tab_size=4)
_console = Console(theme=rich_theme)


def _rich_themed(text: str, style: str, theme: Theme = rich_theme) -> str:
    """
    Wraps a string in rich-formatted syntax.

    Arguments:
        text -- The text to return as rich-formatted
        style -- The name of an existing style from the rich theme.
        theme -- The name of an existing rich.Theme.
            NOTE: This should be custom styles belonging to the theme

    Returns:
        The wrapped string, ready to pass to a rich Console for output.
    def live(self, callback):
        try:
            with Live(
                    self.layout,
                    refresh_per_second=2,
                    screen=False,
                    redirect_stderr=False,
                    redirect_stdout=False,
            ) as live:
                while True:
                    if self.stop_flag:
                        break

                    if callback:
                        callback(self)

                    if not self.resources_by_endpoint:
                        self.layout["endpoints"].update(
                            Panel(
                                Align.center(
                                    Text(
                                        "Waiting for endpoints to come alive"),
                                    vertical="middle",
                                )))
                    else:
                        self.endpoints_layout["data"].update(
                            EndpointMonitor(self.resources_by_endpoint))

                        self.endpoints_values = []
                        updated_keys = set()
                        max_value = 0
                        for (
                                endpoint_name,
                                endpoint_data,
                        ) in self.resources_by_endpoint.items():
                            updated_keys.add(endpoint_name)  # todo: finish

                            total_used = 0
                            total_max = 0
                            for entry in endpoint_data.values():
                                total_used += entry.used
                                total_max += entry.available

                            max_value = max(max_value, total_max)

                            # self.endpoints_values.append(data)
                            past_entries = self.endpoints_past_values.setdefault(
                                endpoint_name, [])
                            past_entries.append(total_used)

                            self.endpoints_values.append(past_entries)

                        self.endpoints_graph = AsciiGraph(
                            self.endpoints_values, max_value, BACKEND_COLORS)
                        self.endpoints_layout["graph"].update(
                            self.endpoints_graph)

                        self.layout["endpoints"].update(
                            Panel(self.endpoints_layout, title="Endpoints"))

                    uptime = datetime.datetime.now() - self.start_time
                    self.layout["header"]["info"].update(
                        Align.right(
                            Text(f"""Node ID: {self.node_id}
                                Uptime: {humanize.naturaldelta(uptime)}
                                https://discord.gg/94KqBcE"""),
                            vertical="middle",
                        ))

                    titles = []

                    table = Table.grid()
                    table.add_column(style="green")
                    table.add_column(no_wrap=True)

                    self.cpu_usage[0].append(psutil.cpu_percent(interval=None))
                    self.ram_usage[0].append(
                        int(round(psutil.virtual_memory().used / 1024**2)))

                    total_gpus_actual = py3nvml.nvmlDeviceGetCount()
                    for i in range(total_gpus_actual):
                        handle = py3nvml.nvmlDeviceGetHandleByIndex(i)
                        meminfo = py3nvml.nvmlDeviceGetMemoryInfo(handle)
                        utilization_info = py3nvml.nvmlDeviceGetUtilizationRates(
                            handle)

                        table.add_row(
                            py3nvml.nvmlDeviceGetName(handle),
                            str(round(meminfo.used / 1024**2)),
                        )
                        self.gpu_mem_usage[i].append(
                            round(meminfo.used / 1024**2))
                        self.gpu_usage[i].append(utilization_info.gpu)

                        color = RICH_COLORS[i]
                        titles.append(
                            f"[{color}]" + py3nvml.nvmlDeviceGetName(handle) +
                            f" {utilization_info.gpu}%, {humanize.naturalsize(meminfo.used)}/{humanize.naturalsize(meminfo.total)}"
                            + "[/]")

                    self.gpu_layout["utilization"].update(
                        self.gpu_usage_graph, )
                    self.gpu_layout["memory"].update(self.gpu_mem_usage_graph)

                    self.layout["gpu"].update(
                        Panel(self.gpu_layout, title=" ".join(titles)))

                    self.cpu_layout["utilization"].update(
                        Panel(self.cpu_usage_graph))
                    self.cpu_layout["memory"].update(
                        Panel(self.ram_usage_graph))

                    self.cpu_layout["utilization"].update(
                        self.cpu_usage_graph, )
                    self.cpu_layout["memory"].update(self.ram_usage_graph)

                    self.layout["cpu"].update(
                        Panel(self.cpu_layout, title=CPU_NAME))

                    self.layout["console"].update(self.tail)

                    sleep(1.0)
        except KeyboardInterrupt as e:
            py3nvml.nvmlShutdown()
            raise e
Example #11
0
 def c_update(self,
              args,
              addline=False,
              update=True,
              force=False,
              provider=False):
     if len(self.core.cfCache) > 0 or len(self.core.wowiCache) > 0:
         self.core.cfCache = {}
         self.core.wowiCache = {}
         self.core.checksumCache = {}
     if args:
         addons = self.parse_args(args)
         compacted = -1
     else:
         addons = sorted(self.core.config['Addons'],
                         key=lambda k: k['Name'].lower())
         compacted = 0
     exceptions = []
     with Progress(
             '{task.completed:.0f}/{task.total}',
             '|',
             BarColumn(bar_width=None),
             '|',
             auto_refresh=False,
             console=None if self.headless else self.console) as progress:
         task = progress.add_task('', total=len(addons))
         if not args:
             self.core.bulk_check(addons)
             self.core.bulk_check_checksum(addons, progress)
         while not progress.finished:
             for addon in addons:
                 try:
                     # noinspection PyTypeChecker
                     name, versionnew, versionold, modified, blocked, source = self.core.\
                         update_addon(addon if isinstance(addon, str) else addon['URL'], update, force)
                     if provider:
                         source = f' [bold white]{source}[/bold white]'
                     else:
                         source = ''
                     if versionold:
                         if versionold == versionnew:
                             if modified:
                                 self.table.add_row(
                                     f'[bold red]Modified[/bold red]{source}',
                                     Text(name, no_wrap=True),
                                     Text(versionold, no_wrap=True))
                             else:
                                 if self.core.config[
                                         'CompactMode'] and compacted > -1:
                                     compacted += 1
                                 else:
                                     self.table.add_row(
                                         f'[green]Up-to-date[/green]{source}',
                                         Text(name, no_wrap=True),
                                         Text(versionold, no_wrap=True))
                         else:
                             if modified or blocked:
                                 self.table.add_row(
                                     f'[bold red]Update suppressed[/bold red]{source}',
                                     Text(name, no_wrap=True),
                                     Text(versionold, no_wrap=True))
                             else:
                                 self.table.add_row(
                                     f'[yellow]{"Updated" if update else "Update available"}'
                                     f'[/yellow]{source}',
                                     Text(name, no_wrap=True),
                                     Text(versionnew,
                                          style='yellow',
                                          no_wrap=True))
                     else:
                         self.table.add_row(
                             f'[bold black]Not installed[/bold black]{source}',
                             Text(addon, no_wrap=True),
                             Text('', no_wrap=True))
                 except Exception as e:
                     exceptions.append(e)
                 progress.update(task,
                                 advance=1 if args else 0.5,
                                 refresh=True)
     if addline:
         self.console.print('')
     self.console.print(self.table)
     if compacted > 0:
         self.console.print(
             f'Additionally [green]{compacted}[/green] addons are up-to-date.'
         )
     if len(addons) == 0:
         self.console.print(
             'Apparently there are no addons installed by CurseBreaker.\n'
             'Command [green]import[/green] might be used to detect already installed addons.'
         )
     if len(exceptions) > 0:
         self.handle_exception(exceptions, False)
    def __init__(self, node_id, resources_by_endpoint) -> None:
        self.node_id = node_id
        self.resources_by_endpoint = resources_by_endpoint
        self.start_time = datetime.datetime.now()

        self.layout = Layout(name="root")
        self.layout.split(
            Layout(name="header", size=6),
            Layout(name="gpu", size=15),
            Layout(name="cpu", size=15),
            Layout(name="endpoints", size=19),
            Layout(name="console", ratio=1),
        )

        self.layout["header"].split_row(
            Layout(name="logo", size=70),
            Layout(name="info", ratio=1),
        )

        self.gpu_layout = Layout(name="gpu")
        self.gpu_layout.split_row(
            Layout(name="utilization", ratio=1),
            Layout(name="memory", ratio=1),
        )

        self.cpu_layout = Layout(name="cpu")
        self.cpu_layout.split_row(
            Layout(name="utilization", ratio=1),
            Layout(name="memory", ratio=1),
        )

        self.endpoints_layout = Layout(name="cpu")
        self.endpoints_layout.split_row(
            Layout(name="data", ratio=1),
            Layout(name="graph", ratio=1),
        )

        py3nvml.nvmlInit()

        self.gpu_mem_usage = [
            collections.deque(maxlen=150),
            collections.deque(maxlen=150),
        ]
        self.gpu_usage = [
            collections.deque(maxlen=150),
            collections.deque(maxlen=150)
        ]
        self.cpu_usage = [collections.deque(maxlen=150)]
        self.ram_usage = [collections.deque(maxlen=150)]

        self.layout["header"]["logo"].update(
            Text(figlet_format("PurpleSmart", font="slant"), style="magenta"))

        width, _ = shutil.get_terminal_size()
        self.console = Console(
            file=io.StringIO(),
            force_terminal=True,
            color_system="truecolor",
            width=width - 4,
        )
        logging.basicConfig(
            level="INFO",
            format="%(message)s",
            datefmt="[%X]",
            handlers=[RichHandler(rich_tracebacks=True, console=self.console)],
        )

        self.tail = LogTail(self.console)

        self.gpu_usage_graph = AsciiGraph(self.gpu_usage, 100)
        max_mem = []
        for i in range(py3nvml.nvmlDeviceGetCount()):
            handle = py3nvml.nvmlDeviceGetHandleByIndex(i)
            meminfo = py3nvml.nvmlDeviceGetMemoryInfo(handle)
            max_mem.append(meminfo.total)
        max_mem = int(round(max(max_mem) / 1024**2))
        self.gpu_mem_usage_graph = AsciiGraph(self.gpu_mem_usage, max_mem)

        self.cpu_usage_graph = AsciiGraph(self.cpu_usage, 100)
        self.ram_usage_graph = AsciiGraph(
            self.ram_usage,
            int(round(psutil.virtual_memory().total / 1024**2)))

        self.endpoints_past_values = {}
        self.stop_flag = False
 def __rich_measure__(self, console: Console,
                      max_width: int) -> Measurement:
     return measure_renderables(console, console.options,
                                [Text("⬛" * self.total)])
Example #14
0
    def parse(self, section, steps):
        """ Testcase Setup section """
        for device in testbed['devices']:
            print(Panel.fit(Text.from_markup(RUNNING)))

            # VS
            with steps.start('Requesting VS API Information',
                             continue_=True) as step:
                try:
                    self.raw_vs = requests.get(
                        "https://%s/mgmt/tm/ltm/virtual" % device_alias,
                        headers=headers,
                        verify=False)
                    print(Panel.fit(Text.from_markup(CLOUD)))
                except Exception as e:
                    step.failed(
                        'There was a problem with the API\n{e}'.format(e=e))

            with steps.start('Requesting SSL API Information',
                             continue_=True) as step:
                try:
                    self.raw_ssl_cert = requests.get(
                        "https://%s/mgmt/tm/sys/file/ssl-cert" % device_alias,
                        headers=headers,
                        verify=False)
                    print(Panel.fit(Text.from_markup(CLOUD)))
                except Exception as e:
                    step.failed(
                        'There was a problem with the API\n{e}'.format(e=e))

            # ---------------------------------------
            # Generate CSV / MD / HTML / Mind Maps
            # ---------------------------------------

            with steps.start('Store data', continue_=True) as step:
                print(Panel.fit(Text.from_markup(WRITING)))

                # VS
                if hasattr(self, 'raw_vs'):
                    self.vs_json = self.raw_vs.json()
                    vs_template = env.get_template('virtual_servers.j2')

                    with open(
                            "Camelot/F5/Virtual_Servers/%s_virtual_servers.json"
                            % device_alias, "w") as fid:
                        json.dump(self.vs_json, fid, indent=4, sort_keys=True)

                    with open(
                            "Camelot/F5/Virtual_Servers/%s_virtual_servers.yaml"
                            % device_alias, "w") as yml:
                        yaml.dump(self.vs_json, yml, allow_unicode=True)

                    for filetype in filetype_loop:
                        parsed_output_vs = vs_template.render(
                            to_parse_vs=self.vs_json['items'],
                            filetype_loop_jinja2=filetype)

                        with open(
                                "Camelot/F5/Virtual_Servers/%s_virtual_servers.%s"
                                % (device_alias, filetype), "w") as fh:
                            fh.write(parsed_output_vs)

                        os.system(
                            "markmap Camelot/F5/Virtual_Servers/%s_virtual_servers.md --output Camelot/F5/Virtual_Servers/%s_virtual_servers_mind_map.html"
                            % (device_alias, device_alias))

                # SSL Certificates
                if hasattr(self, 'raw_ssl_cert'):
                    self.ssl_cert_json = self.raw_ssl_cert.json()
                    ssl_cert_template = env.get_template('ssl_certificates.j2')

                    with open(
                            "Camelot/F5/SSL_Certificates/%s_ssl_certificates.json"
                            % device_alias, "w") as fid:
                        json.dump(self.ssl_cert_json,
                                  fid,
                                  indent=4,
                                  sort_keys=True)

                    with open(
                            "Camelot/F5/SSL_Certificates/%s_ssl_certificates.yaml"
                            % device_alias, "w") as yml:
                        yaml.dump(self.ssl_cert_json, yml, allow_unicode=True)

                    for filetype in filetype_loop:
                        parsed_output_ssl_cert = ssl_cert_template.render(
                            to_parse_ssl_cert=self.ssl_cert_json['items'],
                            filetype_loop_jinja2=filetype)

                        with open(
                                "Camelot/F5/SSL_Certificates/%s_ssl_certificates.%s"
                                % (device_alias, filetype), "w") as fh:
                            fh.write(parsed_output_ssl_cert)

                        os.system(
                            "markmap Camelot/F5/SSL_Certificates/%s_ssl_certificates.md --output Camelot/F5/SSL_Certificates/%s_ssl_certificates_mind_map.html"
                            % (device_alias, device_alias))

        # ---------------------------------------
        # You Made It
        # ---------------------------------------
        print(Panel.fit(Text.from_markup(FINISHED)))
Example #15
0
def render(
    markup: str,
    style: Union[str, Style] = "",
    emoji: bool = True,
    emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
    """Render console markup in to a Text instance.

    Args:
        markup (str): A string containing console markup.
        emoji (bool, optional): Also render emoji code. Defaults to True.

    Raises:
        MarkupError: If there is a syntax error in the markup.

    Returns:
        Text: A test instance.
    """
    emoji_replace = _emoji_replace
    if "[" not in markup:
        return Text(
            emoji_replace(markup, default_variant=emoji_variant)
            if emoji else markup,
            style=style,
        )
    text = Text(style=style)
    append = text.append
    normalize = Style.normalize

    style_stack: List[Tuple[int, Tag]] = []
    pop = style_stack.pop

    spans: List[Span] = []
    append_span = spans.append

    _Span = Span
    _Tag = Tag

    def pop_style(style_name: str) -> Tuple[int, Tag]:
        """Pop tag matching given style name."""
        for index, (_, tag) in enumerate(reversed(style_stack), 1):
            if tag.name == style_name:
                return pop(-index)
        raise KeyError(style_name)

    for position, plain_text, tag in _parse(markup):
        if plain_text is not None:
            append(emoji_replace(plain_text) if emoji else plain_text)
        elif tag is not None:
            if tag.name.startswith("/"):  # Closing tag
                style_name = tag.name[1:].strip()

                if style_name:  # explicit close
                    style_name = normalize(style_name)
                    try:
                        start, open_tag = pop_style(style_name)
                    except KeyError:
                        raise MarkupError(
                            f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
                        ) from None
                else:  # implicit close
                    try:
                        start, open_tag = pop()
                    except IndexError:
                        raise MarkupError(
                            f"closing tag '[/]' at position {position} has nothing to close"
                        ) from None

                if open_tag.name.startswith("@"):
                    if open_tag.parameters:
                        try:
                            meta_params = literal_eval(open_tag.parameters)
                        except SyntaxError as error:
                            raise MarkupError(
                                f"error parsing {open_tag.parameters!r}; {error.msg}"
                            )
                        except Exception as error:
                            raise MarkupError(
                                f"error parsing {open_tag.parameters!r}; {error}"
                            ) from None

                    else:
                        meta_params = ()

                    append_span(
                        _Span(start, len(text),
                              Style(meta={open_tag.name: meta_params})))
                else:
                    append_span(_Span(start, len(text), str(open_tag)))

            else:  # Opening tag
                normalized_tag = _Tag(normalize(tag.name), tag.parameters)
                style_stack.append((len(text), normalized_tag))

    text_length = len(text)
    while style_stack:
        start, tag = style_stack.pop()
        style = str(tag)
        if style:
            append_span(_Span(start, text_length, style))

    text.spans = sorted(spans[::-1], key=attrgetter("start"))
    return text
Example #16
0
 def render(self, task) -> RenderableType:
     total = task.total if task.total != float("inf") else "--"
     return Text(f"{int(task.completed)}/{total}", style=self.style)
Example #17
0
def output_console(
    all_cve_data: Dict[ProductInfo, CVEData], console=Console(theme=cve_theme)
):
    """ Output list of CVEs in a tabular format with color support """

    now = datetime.now().strftime("%Y-%m-%d  %H:%M:%S")

    console.print(
        Markdown(
            textwrap.dedent(
                f"""
                # CVE BINARY TOOL
                - cve-bin-tool Report Generated: {now}
                """
            )
        )
    )

    remarks_colors = {
        Remarks.Mitigated: "green",
        Remarks.Confirmed: "red",
        Remarks.NewFound: "blue",
        Remarks.Unexplored: "yellow",
        Remarks.Ignored: "white",
    }

    cve_by_remarks: DefaultDict[Remarks, List[Dict[str, str]]] = defaultdict(list)
    # group cve_data by its remarks
    for product_info, cve_data in all_cve_data.items():
        for cve in cve_data["cves"]:
            cve_by_remarks[cve.remarks].append(
                {
                    "vendor": product_info.vendor,
                    "product": product_info.product,
                    "version": product_info.version,
                    "cve_number": cve.cve_number,
                    "severity": cve.severity,
                }
            )

    for remarks in sorted(cve_by_remarks):
        color = remarks_colors[remarks]
        console.print(Panel(f"[{color}] {remarks.name} CVEs [/{color}]", expand=False))
        # table instance
        table = Table()

        # Add Head Columns to the Table
        table.add_column("Vendor")
        table.add_column("Product")
        table.add_column("Version")
        table.add_column("CVE Number")
        table.add_column("Severity")

        for cve_data in cve_by_remarks[remarks]:
            color = cve_data["severity"].lower()
            table.add_row(
                Text.styled(cve_data["vendor"], color),
                Text.styled(cve_data["product"], color),
                Text.styled(cve_data["version"], color),
                linkify_cve(Text.styled(cve_data["cve_number"], color)),
                Text.styled(cve_data["severity"], color),
            )
        # Print the table to the console
        console.print(table)
Example #18
0
 def render(self, task) -> RenderableType:
     task_speed = f"{task.speed:>.2f}" if task.speed is not None else "0.00"
     return Text(f"{task_speed}it/s", style=self.style)
Example #19
0
 def time_formatter(timestamp: datetime) -> Text:
     return Text("TIME")
Example #20
0
def model_detailed_view(model: MLModel):
    # TODO: update the print fields
    dim_color = 'grey62'

    grid = Table.grid(padding=(0, 2))
    # Basic Info
    grid.add_row('ID',
                 'Architecture',
                 'Framework',
                 'Version',
                 'Pretrained Dataset',
                 'Metric',
                 'Score',
                 'Task',
                 style='b')
    grid.add_row(
        str(model.id),
        model.architecture,
        model.framework.name,
        str(model.version),
        model.dataset,
        list(model.metric.keys())[0].name,  # TODO: display multiple metrics
        str(list(model.metric.values())[0]),
        model.task.name.replace('_', ' '))

    converted_grid = Table.grid(padding=(0, 2))
    for i in range(5):
        converted_grid.add_column(style=dim_color, justify='right')
        converted_grid.add_column()
    # Converted model info
    time_delta = humanize.naturaltime(datetime.now().astimezone() -
                                      model.create_time)
    converted_grid.add_row(
        Text('Converted Model Info', style='bold cyan3', justify='left'))
    converted_grid.add_row(
        'Serving Engine',
        model.engine.name,
        'Status',
        status_mapper[model.status.name],
        'Creator',
        model.creator,
        'Created',
        time_delta,
    )

    first = True
    for input_ in model.inputs:
        converted_grid.add_row(
            Text('Inputs', style=f'b {dim_color}') if first else '',
            '',
            'Name',
            input_.name,
            'Shape',
            str(input_.shape),
            'Data Type',
            input_.dtype.name.split('_')[-1],
            'Format',
            input_.format.name,
        )
        first = False
    first = True
    for output_ in model.outputs:
        converted_grid.add_row(
            Text('Outputs', style=f'b {dim_color}') if first else '',
            '',
            'Name',
            output_.name,
            'Shape',
            str(output_.shape),
            'Data Type',
            output_.dtype.name.split('_')[-1],
            'Format',
            output_.format.name,
        )
        first = False
    converted_grid.add_row()

    # Profiling results
    converted_grid.add_row(
        Text('Profiling Results', style='bold cyan3', justify='left'))
    if not model.profile_result:
        converted_grid.add_row('N.A.')
    else:
        spr = model.profile_result['static_result']
        dprs = model.profile_result['dynamic_results']

        # Static profiling result
        converted_grid.add_row(
            Text('Static Result', style='bold turquoise2', justify='left'))
        if spr == 'N.A.':
            converted_grid.add_row(Text('N.A.', justify='left'))
        else:
            pass
        converted_grid.add_row()

        # Dynamic profiling results
        converted_grid.add_row(
            Text('Dynamic Results', style='bold turquoise2', justify='left'))
        for dpr in dprs:
            create_time = datetime.strptime(dpr['create_time'],
                                            '%Y-%m-%dT%H:%M:%S.%f')
            time_delta = humanize.naturaltime(datetime.now() - create_time)
            converted_grid.add_row(
                'Device Name',
                dpr['device_name'],
                'IP',
                dpr['ip'],
                'Device ID',
                dpr['device_id'],
                'Batch Size',
                str(dpr['batch']),
                'Created',
                humanize.naturaltime(time_delta),
            )

            memory = dpr['memory']
            converted_grid.add_row(Text('GPU', style='b', justify='left'))
            converted_grid.add_row(
                'Memory Used',
                f'{humanize.naturalsize(memory["memory_usage"], binary=True)} '
                f'/ {humanize.naturalsize(memory["total_memory"], binary=True)}',
                'Util', f'{memory["utilization"] * 100:.1f} %')
            latency = dpr['latency']['inference_latency']
            converted_grid.add_row(
                Text('Inference Latency', style='b', justify='left'))
            converted_grid.add_row(
                'Average',
                f'{latency["avg"]:.3f} ms',
                'P50',
                f'{latency["p50"]:.3f} ms',
                'P95',
                f'{latency["p95"]:.3f} ms',
                'P99',
                f'{latency["p99"]:.3f} ms',
            )

            converted_grid.add_row(
                Text('Throughput', style='b', justify='left'))
            throughput = dpr['throughput']['inference_throughput']
            converted_grid.add_row('Inference', f'{throughput:.3f} req/s')

    console.print(grid)
    console.print(converted_grid)
Example #21
0
 def words(self, words, delimiter):
     word_text = Text(delimiter.join(words),
                      style="bold white on navy_blue")
     console.print(word_text)
Example #22
0
class ACMERichLogHandler(RichHandler):
    def __init__(self,
                 level: int = logging.NOTSET,
                 console: Console = None) -> None:
        super().__init__(level=level)

        # Add own styles to the current console object's styles
        self.console._styles['repr.dim'] = Style(color='grey70', dim=True)
        self.console._styles['repr.request'] = Style(color='spring_green2')
        self.console._styles['repr.response'] = Style(color='magenta2')
        self.console._styles['repr.id'] = Style(color='light_sky_blue1')
        self.console._styles['repr.url'] = Style(color='sandy_brown',
                                                 underline=True)
        self.console._styles['logging.level.debug'] = Style(color='grey50')
        self.console._styles['logging.level.warning'] = Style(color='orange3')
        self.console._styles['logging.level.error'] = Style(color='red',
                                                            reverse=True)

        # Set own highlights
        self.highlighter.highlights = [
            r"(?P<brace>[\{\[\(\)\]\}])",
            #r"(?P<tag_start>\<)(?P<tag_name>\w*)(?P<tag_contents>.*?)(?P<tag_end>\>)",
            #r"(?P<attrib_name>\w+?)=(?P<attrib_value>\"?\w+\"?)",
            r"(?P<bool_true>True)|(?P<bool_false>False)|(?P<none>None)",
            r"(?P<id>(?<!\w)\-?[0-9]+\.?[0-9]*\b)",
            r"(?P<number>0x[0-9a-f]*)",
            #r"(?P<filename>\/\w*\.\w{3,4})\s",
            r"(?<!\\)(?P<str>b?\'\'\'.*?(?<!\\)\'\'\'|b?\'.*?(?<!\\)\'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
            r"(?P<id>[\w\-_.]+[0-9]+\.?[0-9])",  # ID
            r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\~\+\!`\(\)\,\.\?\/\;\:\&\=\%]*)",
            #r"(?P<uuid>[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})",
            r"(?P<dim>^[0-9]+\.?[0-9]*\b - )",  # thread ident at front
            r"(?P<request>==>.*:)",  # Incoming request or response
            r"(?P<response><== [^ :]+[ :]+)",  # outgoing response or request
            r"(?P<number>\(RSC: [0-9]+\.?[0-9]\))",  # Result code
            r"(?P<id> [\w/\-_]*/[\w/\-_]+)",  # ID
            #r"(?P<id>(acp|ae|bat|cin|cnt|csest|dvi|grp|la|mem|nod|ol|sub)[0-9]+\.?[0-9])",		# ID
        ]

    def emit(self, record: LogRecord) -> None:
        """Invoked by logging."""
        #path = Path(record.pathname).name
        log_style = f"logging.level.{record.levelname.lower()}"
        message = self.format(record)
        path = ''
        lineno = 0
        threadID = 0
        if len(messageElements := message.split('*', 3)) == 4:
            path = messageElements[0]
            lineno = int(messageElements[1])
            threadID = int(messageElements[2])
            message = messageElements[3]
        time_format = None if self.formatter is None else self.formatter.datefmt
        log_time = datetime.datetime.fromtimestamp(record.created)

        level = Text()
        level.append(record.levelname, log_style)
        # message_text = Text("%d - %s" %(threading.current_thread().native_id, message))
        message_text = Text("%s - %s" % (threadID, message))
        message_text = self.highlighter(message_text)

        # # find caller on the stack
        # caller = inspect.getframeinfo(inspect.stack()[8][0])

        self.console.print(
            self._log_render(
                self.console,
                [message_text],
                log_time=log_time,
                time_format=time_format,
                level=level,
                path=path,
                line_no=lineno,
            )
            # self._log_render(
            # 	self.console,
            # 	[message_text],
            # 	log_time=log_time,
            # 	time_format=time_format,
            # 	level=level,
            # 	path=os.path.basename(caller.filename),
            # 	line_no=caller.lineno,
            # )
        )
Example #23
0
    def output_profiles(
        self,
        stats: ScaleneStatistics,
        pid: int,
        profile_this_code: Callable[[Filename, LineNumber], bool],
        python_alias_dir_name: Filename,
        python_alias_dir: Filename,
        profile_memory: bool = True,
        reduced_profile: bool = False,
    ) -> bool:
        """Write the profile out."""
        # Get the children's stats, if any.
        if not pid:
            stats.merge_stats(python_alias_dir_name)
            try:
                shutil.rmtree(python_alias_dir)
            except BaseException:
                pass
        current_max: float = stats.max_footprint
        # If we've collected any samples, dump them.
        if (not stats.total_cpu_samples
                and not stats.total_memory_malloc_samples
                and not stats.total_memory_free_samples):
            # Nothing to output.
            return False
        # Collect all instrumented filenames.
        all_instrumented_files: List[Filename] = list(
            set(
                list(stats.cpu_samples_python.keys()) +
                list(stats.cpu_samples_c.keys()) +
                list(stats.memory_free_samples.keys()) +
                list(stats.memory_malloc_samples.keys())))
        if not all_instrumented_files:
            # We didn't collect samples in source files.
            return False
        title = Text()
        mem_usage_line: Union[Text, str] = ""
        growth_rate = 0.0
        if profile_memory:
            samples = stats.memory_footprint_samples
            if len(samples.get()) > 0:
                # Output a sparkline as a summary of memory usage over time.
                _, _, spark_str = sparkline.generate(
                    samples.get()[0:samples.len()], 0, current_max)
                # Compute growth rate (slope), between 0 and 1.
                if stats.allocation_velocity[1] > 0:
                    growth_rate = (100.0 * stats.allocation_velocity[0] /
                                   stats.allocation_velocity[1])
                # If memory used is > 1GB, use GB as the unit.
                if current_max > 1024:
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (f" (max: {(current_max / 1024):6.2f}GB, growth rate: {growth_rate:3.0f}%)\n"
                         ),
                    )
                else:
                    # Otherwise, use MB.
                    mem_usage_line = Text.assemble(
                        "Memory usage: ",
                        ((spark_str, "blue")),
                        (f" (max: {current_max:6.2f}MB, growth rate: {growth_rate:3.0f}%)\n"
                         ),
                    )

        null = open("/dev/null", "w")

        # Get column width of the terminal and adjust to fit.
        # Note that Scalene works best with at least 132 columns.
        column_width = 132
        if not self.html:
            try:
                # If we are in a Jupyter notebook, stick with 132
                if "ipykernel" in sys.modules:
                    column_width = 132
                else:
                    column_width = shutil.get_terminal_size().columns
            except:
                pass

        console = Console(width=column_width,
                          record=True,
                          force_terminal=True,
                          file=null)
        # Build a list of files we will actually report on.
        report_files: List[Filename] = []
        # Sort in descending order of CPU cycles, and then ascending order by filename
        for fname in sorted(
                all_instrumented_files,
                key=lambda f: (-(stats.cpu_samples[f]), f),
        ):
            fname = Filename(fname)
            try:
                percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                    stats.total_cpu_samples)
            except ZeroDivisionError:
                percent_cpu_time = 0

            # Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
            if (stats.malloc_samples[fname] < self.malloc_threshold
                    and percent_cpu_time < self.cpu_percent_threshold):
                continue
            report_files.append(fname)

        # Don't actually output the profile if we are a child process.
        # Instead, write info to disk for the main process to collect.
        if pid:
            stats.output_stats(pid, python_alias_dir_name)
            return True

        if len(report_files) == 0:
            return False

        for fname in report_files:

            # If the file was actually a Jupyter (IPython) cell,
            # restore its name, as in "[12]".
            fname_print = fname
            import re

            result = re.match("<ipython-input-([0-9]+)-.*>", fname_print)
            if result:
                fname_print = Filename("[" + result.group(1) + "]")

            # Print header.
            if not stats.total_cpu_samples:
                percent_cpu_time = 0
            else:
                percent_cpu_time = (100 * stats.cpu_samples[fname] /
                                    stats.total_cpu_samples)
            new_title = mem_usage_line + (
                "%s: %% of time = %6.2f%% out of %6.2fs." %
                (fname_print, percent_cpu_time, stats.elapsed_time))
            # Only display total memory usage once.
            mem_usage_line = ""

            tbl = Table(
                box=box.MINIMAL_HEAVY_HEAD,
                title=new_title,
                collapse_padding=True,
                width=column_width - 1,
            )

            tbl.add_column("Line",
                           style="dim",
                           justify="right",
                           no_wrap=True,
                           width=4)
            tbl.add_column(Markdown("Time  " + "\n" + "_Python_"),
                           no_wrap=True,
                           width=6)
            tbl.add_column(Markdown("––––––  \n_native_"),
                           no_wrap=True,
                           width=6)
            tbl.add_column(Markdown("––––––  \n_system_"),
                           no_wrap=True,
                           width=6)
            if self.gpu:
                tbl.add_column(Markdown("––––––  \n_GPU_"),
                               no_wrap=True,
                               width=6)

            other_columns_width = 0  # Size taken up by all columns BUT code

            if profile_memory:
                tbl.add_column(Markdown("Memory  \n_Python_"),
                               no_wrap=True,
                               width=7)
                tbl.add_column(Markdown("––––––  \n_net_"),
                               no_wrap=True,
                               width=6)
                tbl.add_column(
                    Markdown("–––––––––––  \n_timeline_/%"),
                    no_wrap=True,
                    width=14,
                )
                tbl.add_column(Markdown("Copy  \n_(MB/s)_"),
                               no_wrap=True,
                               width=6)
                other_columns_width = 75 + (6 if self.gpu else 0)
                tbl.add_column(
                    "\n" + fname_print,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )
            else:
                other_columns_width = 37 + (5 if self.gpu else 0)
                tbl.add_column(
                    "\n" + fname_print,
                    width=column_width - other_columns_width,
                    no_wrap=True,
                )

            # Print out the the profile for the source, line by line.
            with open(fname, "r") as source_file:
                # We track whether we should put in ellipsis (for reduced profiles)
                # or not.
                did_print = True  # did we print a profile line last time?
                code_lines = source_file.read()
                # Generate syntax highlighted version for the whole file,
                # which we will consume a line at a time.
                # See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233
                syntax_highlighted = None
                if self.html:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="default",
                        line_numbers=False,
                        code_width=None,
                    )
                else:
                    syntax_highlighted = Syntax(
                        code_lines,
                        "python",
                        theme="vim",
                        line_numbers=False,
                        code_width=None,
                    )
                capture_console = Console(
                    width=column_width - other_columns_width,
                    force_terminal=True,
                )
                formatted_lines = [
                    SyntaxLine(segments) for segments in
                    capture_console.render_lines(syntax_highlighted)
                ]
                for line_no, line in enumerate(formatted_lines, start=1):
                    old_did_print = did_print
                    did_print = self.output_profile_line(
                        fname,
                        LineNumber(line_no),
                        line,
                        console,
                        tbl,
                        stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=False,
                        is_function_summary=False,
                        reduced_profile=reduced_profile,
                    )
                    if old_did_print and not did_print:
                        # We are skipping lines, so add an ellipsis.
                        tbl.add_row("...")
                    old_did_print = did_print

            # Potentially print a function summary.
            fn_stats = stats.build_function_stats(fname)
            print_fn_summary = False
            for fn_name in fn_stats.cpu_samples_python:
                if fn_name == fname:
                    continue
                print_fn_summary = True
                break

            if print_fn_summary:
                tbl.add_row(None, end_section=True)
                txt = Text.assemble(f"function summary for {fname}",
                                    style="bold italic")
                if profile_memory:
                    if self.gpu:
                        tbl.add_row("", "", "", "", "", "", "", "", "", txt)
                    else:
                        tbl.add_row("", "", "", "", "", "", "", "", txt)
                else:
                    if self.gpu:
                        tbl.add_row("", "", "", "", "", txt)
                    else:
                        tbl.add_row("", "", "", "", txt)

                for fn_name in sorted(
                        fn_stats.cpu_samples_python,
                        key=lambda k: stats.firstline_map[k],
                ):
                    if fn_name == fname:
                        continue
                    if self.html:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="default",
                            line_numbers=False,
                            code_width=None,
                        )
                    else:
                        syntax_highlighted = Syntax(
                            fn_name,
                            "python",
                            theme="vim",
                            line_numbers=False,
                            code_width=None,
                        )
                    # force print, suppress line numbers
                    self.output_profile_line(
                        fn_name,
                        LineNumber(1),
                        syntax_highlighted,  # type: ignore
                        console,
                        tbl,
                        fn_stats,
                        profile_this_code,
                        profile_memory=profile_memory,
                        force_print=True,
                        suppress_lineno_print=True,
                        is_function_summary=True,
                        reduced_profile=reduced_profile,
                    )

            console.print(tbl)

            # Report top K lines (currently 5) in terms of net memory consumption.
            net_mallocs: Dict[LineNumber, float] = defaultdict(float)
            for line_no in stats.bytei_map[fname]:
                for bytecode_index in stats.bytei_map[fname][line_no]:
                    net_mallocs[line_no] += (stats.memory_malloc_samples[fname]
                                             [line_no][bytecode_index] -
                                             stats.memory_free_samples[fname]
                                             [line_no][bytecode_index])
            net_mallocs = OrderedDict(
                sorted(net_mallocs.items(), key=itemgetter(1), reverse=True))
            if len(net_mallocs) > 0:
                console.print("Top net memory consumption, by line:")
                number = 1
                for net_malloc_lineno in net_mallocs:
                    if net_mallocs[net_malloc_lineno] <= 1:
                        break
                    if number > 5:
                        break
                    output_str = ("(" + str(number) + ") " +
                                  ("%5.0f" % (net_malloc_lineno)) + ": " +
                                  ("%5.0f" %
                                   (net_mallocs[net_malloc_lineno])) + " MB")
                    console.print(output_str)
                    number += 1

            # Only report potential leaks if the allocation velocity (growth rate) is above some threshold
            # FIXME: fixed at 1% for now.
            # We only report potential leaks where the confidence interval is quite tight and includes 1.
            growth_rate_threshold = 0.01
            leak_reporting_threshold = 0.05
            leaks = []
            if growth_rate / 100 > growth_rate_threshold:
                vec = list(stats.leak_score[fname].values())
                keys = list(stats.leak_score[fname].keys())
                for index, item in enumerate(stats.leak_score[fname].values()):
                    # See https://en.wikipedia.org/wiki/Rule_of_succession
                    frees = item[1]
                    allocs = item[0]
                    expected_leak = (frees + 1) / (frees + allocs + 2)
                    if expected_leak <= leak_reporting_threshold:
                        leaks.append((
                            keys[index],
                            1 - expected_leak,
                            net_mallocs[keys[index]],
                        ))
                if len(leaks) > 0:
                    # Report in descending order by least likelihood
                    for leak in sorted(leaks, key=itemgetter(1), reverse=True):
                        output_str = (
                            "Possible memory leak identified at line " +
                            str(leak[0]) + " (estimated likelihood: " +
                            ("%3.0f" %
                             (leak[1] * 100)) + "%" + ", velocity: " +
                            ("%3.0f MB/s" %
                             (leak[2] / stats.elapsed_time)) + ")")
                        console.print(output_str)

        if self.html:
            # Write HTML file.
            md = Markdown(
                "generated by the [scalene](https://github.com/plasma-umass/scalene) profiler"
            )
            console.print(md)
            if not self.output_file:
                self.output_file = "/dev/stdout"
            console.save_html(self.output_file, clear=False)
        else:
            if not self.output_file:
                # No output file specified: write to stdout.
                sys.stdout.write(console.export_text(styles=True))
            else:
                # Don't output styles to text file.
                console.save_text(self.output_file, styles=False, clear=False)
        return True
Example #24
0
 def __rich__(self) -> Text:
     document, content = self.open_current_document()
     return Panel(Text(content, justify='left'), title=document)
Example #25
0
def test_rstrip_end():
    test = Text("Hello, World!    ")
    test.rstrip_end(14)
    assert str(test) == "Hello, World! "
Example #26
0
def main():
    parser = argparse.ArgumentParser(description='shaku')
    parser.add_argument('--dir',
                        dest='dir',
                        type=str,
                        required=True,
                        help='Directory of documents to annotate')
    parser.add_argument('--tags',
                        dest='tags',
                        nargs='+',
                        required=True,
                        help='List of tags or categories')
    parser.add_argument('--urls', dest='urls', nargs='+', help='List of URLs')
    parser.add_argument('--json',
                        dest='json',
                        type=str,
                        help='Output JSON file')
    parser.add_argument('--yaml',
                        dest='yaml',
                        type=str,
                        help='Output yaml file')
    parser.add_argument('--numpy',
                        dest='numpy',
                        type=str,
                        help='Output numpy file')
    args = parser.parse_args()

    docLoader = DocumentLoader(args)

    console = Console()
    layout = Layout()

    layout.split(
        Layout(name="tags_banner", size=1),
        Layout(name="tags", size=3),
        Layout(name="main"),
        Layout(name="progressbar", size=3),
    )

    layout['tags_banner'].update(
        Align.center(Text('Categories', justify='center'), vertical='top'))

    layout['tags'].update(Align.center(docLoader.tagMgr))

    layout['progressbar'].update(Align.center(docLoader.progressMgr))

    layout['main'].update(docLoader)

    # In macOS, keyboard needs to be executed with sudo
    def handleLeftKey(e):
        docLoader.prevTag()

    def handleRightKey(e):
        docLoader.nextTag()

    def handleDownKey(e):
        docLoader.nextDoc()

    def handleUpKey(e):
        docLoader.prevDoc()

    def handleEnterKey(e):
        docLoader.saveDoc()

    def handleQuit():
        yaml_file = args.dir + '/.shaku_config.yaml'
        with open(yaml_file, 'w') as f:
            yaml.dump({'_last_doc': docLoader.currentDoc()}, f)

    keyboard.on_press_key("left", handleLeftKey)
    keyboard.on_press_key("right", handleRightKey)
    keyboard.on_press_key("down", handleDownKey)
    keyboard.on_press_key("up", handleUpKey)
    keyboard.on_press_key("enter", handleEnterKey)

    with Live(layout, screen=True) as live:
        try:
            while True:
                sleep(1)
        except KeyboardInterrupt as e:
            handleQuit()
            pass
Example #27
0
def test_stylize_negative_index():
    test = Text("Hello, World!")
    test.stylize("bold", -6, -1)
    assert test._spans == [Span(7, 12, "bold")]
Example #28
0
def make_test_card() -> Table:
    """Get a renderable that demonstrates a number of features."""
    table = Table.grid(padding=1, pad_edge=True)
    table.title = "Rich features"
    table.add_column("Feature",
                     no_wrap=True,
                     justify="center",
                     style="bold red")
    table.add_column("Demonstration")

    color_table = Table(
        box=None,
        expand=False,
        show_header=False,
        show_edge=False,
        pad_edge=False,
    )
    color_table.add_row(
        # "[bold yellow]256[/] colors or [bold green]16.7 million[/] colors [blue](if supported by your terminal)[/].",
        ("✓ [bold green]4-bit color[/]\n"
         "✓ [bold blue]8-bit color[/]\n"
         "✓ [bold magenta]Truecolor (16.7 million)[/]\n"
         "✓ [bold yellow]Dumb terminals[/]\n"
         "✓ [bold cyan]Automatic color conversion"),
        ColorBox(),
    )

    table.add_row("Colors", color_table)

    table.add_row(
        "Styles",
        "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
    )

    lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
    lorem_table = Table.grid(padding=1, collapse_padding=True)
    lorem_table.pad_edge = False
    lorem_table.add_row(
        Text(lorem, justify="left", style="green"),
        Text(lorem, justify="center", style="yellow"),
        Text(lorem, justify="right", style="blue"),
        Text(lorem, justify="full", style="red"),
    )
    table.add_row(
        "Text",
        RenderGroup(
            Text.from_markup(
                """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
            ),
            lorem_table,
        ),
    )

    def comparison(renderable1, renderable2) -> Table:
        table = Table(show_header=False, pad_edge=False, box=None, expand=True)
        table.add_column("1", ratio=1)
        table.add_column("2", ratio=1)
        table.add_row(renderable1, renderable2)
        return table

    table.add_row(
        "Asian\nlanguage\nsupport",
        ":flag_for_china:  该库支持中文,日文和韩文文本!\n:flag_for_japan:  ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea:  이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
    )

    markup_example = (
        "[bold magenta]Rich[/] supports a simple [i]bbcode[/i] like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
        ":+1: :apple: :ant: :bear: :baguette_bread: :bus: ")
    table.add_row("Markup", markup_example)

    example_table = Table(
        show_edge=False,
        show_header=True,
        expand=False,
        row_styles=["none", "dim"],
        box=box.SIMPLE,
    )
    example_table.add_column("[green]Date", style="green", no_wrap=True)
    example_table.add_column("[blue]Title", style="blue")
    example_table.add_column(
        "[cyan]Production Budget",
        style="cyan",
        justify="right",
        no_wrap=True,
    )
    example_table.add_column(
        "[magenta]Box Office",
        style="magenta",
        justify="right",
        no_wrap=True,
    )
    example_table.add_row(
        "Dec 20, 2019",
        "Star Wars: The Rise of Skywalker",
        "$275,000,000",
        "$375,126,118",
    )
    example_table.add_row(
        "May 25, 2018",
        "[b]Solo[/]: A Star Wars Story",
        "$275,000,000",
        "$393,151,347",
    )
    example_table.add_row(
        "Dec 15, 2017",
        "Star Wars Ep. VIII: The Last Jedi",
        "$262,000,000",
        "[bold]$1,332,539,889[/bold]",
    )
    example_table.add_row(
        "May 19, 1999",
        "Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
        "$115,000,000",
        "$1,027,044,677",
    )

    table.add_row("Tables", example_table)

    code = '''\
def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
    """Iterate and generate a tuple with a flag for last value."""
    iter_values = iter(values)
    try:
        previous_value = next(iter_values)
    except StopIteration:
        return
    for value in iter_values:
        yield False, previous_value
        previous_value = value
    yield True, previous_value'''

    pretty_data = {
        "foo": [
            3.1427,
            (
                "Paul Atriedies",
                "Vladimir Harkonnen",
                "Thufir Haway",
            ),
        ],
        "atomic": (False, True, None),
    }
    table.add_row(
        "Syntax\nhighlighting\n&\npretty\nprinting",
        comparison(
            Syntax(code, "python3", line_numbers=True, indent_guides=True),
            Pretty(pretty_data, indent_guides=True),
        ),
    )

    markdown_example = """\
# Markdown

Supports much of the *markdown*, __syntax__!

- Headers
- Basic formatting: **bold**, *italic*, `code`
- Block quotes
- Lists, and more...
    """
    table.add_row(
        "Markdown",
        comparison("[cyan]" + markdown_example, Markdown(markdown_example)))

    table.add_row(
        "+more!",
        """Progress bars, columns, styled logging handler, tracebacks, etc...""",
    )
    return table
Example #29
0
def test_join():
    test = Text("bar").join([Text("foo", "red"), Text("baz", "blue")])
    assert str(test) == "foobarbaz"
    assert test._spans == [Span(0, 3, "red"), Span(6, 9, "blue")]
Example #30
0
        self.set_ENABLE(True)
        self.set_STOP(True)
        self.set_HALT(True)
        self.read()
        # time.sleep(3)
        self.write()
        # time.sleep(3)
        self.read()
        # time.sleep(3)
        test_positions = [1, 3, 6, 1, 6, 2, 5, 1, 6]
        for p in test_positions:
            self.position = p    
            self.set_START(True)
            self.write()
            # while not self.is_ACK():
            #    self.read()
            self.set_START(False)
            self.write()
            self.read()
            while not self.is_MC():
                self.read()
            # time.sleep(10)
            #while not self.is_HALT():
            #    await self.read(con)
    
    

lama_1 = LamaTest('192.168.25.101', 502) #init 192.168.25.101:502   
print(Text.from_markup(str(lama_1)))
lama_1.move_to_pos(1)
lama_1.exit_lama()