Beispiel #1
0
def test_rich_print_json():
    console = rich.get_console()
    with console.capture() as capture:
        rich.print_json('[false, true, null, "foo"]', indent=4)
    result = capture.get()
    print(repr(result))
    expected = '[\n    false,\n    true,\n    null,\n    "foo"\n]\n'
    assert result == expected
Beispiel #2
0
def test_rich_print_json_no_truncation():
    console = rich.get_console()
    with console.capture() as capture:
        rich.print_json(f'["{"x" * 100}", {int(2e128)}]', indent=4)
    result = capture.get()
    print(repr(result))
    assert ("x" * 100) in result
    assert str(int(2e128)) in result
Beispiel #3
0
def test_rich_print_json_round_trip():
    data = ["x" * 100, 2e128]
    console = rich.get_console()
    with console.capture() as capture:
        rich.print_json(data=data, indent=4)
    result = capture.get()
    print(repr(result))
    result_data = json.loads(result)
    assert result_data == data
def display_output(output, save=None):
    if output["code"] != 200:
        print(
            f"\n\n[red]Error Status Code: {output['code']} : {output['msg']}[/red]"
        )
        exit(1)
    else:
        command_called = inspect.stack()[2][3]
        if save:
            with open(f"{save_output_dir}/{command_called}.json",
                      "w+") as file:
                json.dump(output["msg"], file, indent=4)
        from rich import print_json

        print_json(json.dumps(output["msg"]))
Beispiel #5
0
 def print_json(
     cls,
     data: Any,
     file: IO | None = None,
     **kwargs: Any,
 ) -> Any:
     '''Print highlighted json to stdout depending if we are in a TTY'''
     file = file if file is not None else sys.stdout
     if file.isatty():
         rich.print_json(
             data=data,
             highlight=cls.config.color,
             **kwargs,
         )
     else:
         print(json.dumps(data, **kwargs), file=file)
def render_single_result(
    ctx: JobbergateContext,
    result: Union[Dict[str, Any], pydantic.BaseModel],
    hidden_fields: Optional[List[str]] = None,
    title: str = "Result",
):
    """
    Render a single data item in a ``rich`` ``Table.

    :param: ctx:           The JobbergateContext. This is needed to detect if ``full` or ``raw`` output is needed
    :param: result:        The data item to display. May be a dict or a pydantic model.
    :param: hidden_fields: Rows that should (if not using ``full`` mode) be hidden in the ``Table`` output
    :param: title:         The title header to include above the ``Tale`` output
    """
    if isinstance(result, pydantic.BaseModel):
        result = result.dict()
    if ctx.raw_output:
        print_json(json.dumps(result))
    else:
        if ctx.full_output or hidden_fields is None:
            hidden_fields = []
        render_dict(result, hidden_fields=hidden_fields, title=title)
Beispiel #7
0
def lookup(ctx, resource, fields, api_key, pretty_print, raw, copy, exclude):
    """
    Lookup resources by using the IPData class methods.

    :param resource: The resource to lookup
    :param fields: A list of supported fields passed as multiple parameters eg. "... -f ip -f country_name"
    :param api_key: A valid API key
    :param pretty_print: Whether to pretty print the response with panels
    :param raw: Whether to print raw unformatted but syntax-highlighted JSON
    :param copy: Copy the response to the clipboard
    """
    api_key = api_key if api_key else ctx.obj["api-key"]
    ipdata = IPData(api_key)

    # enforce mutual exclusivity of fields and exclude
    if exclude and fields:
        raise click.ClickException(
            "'--fields / -f' and '--exclude / -e' are mutually exclusive.")

    # if the user wants to exclude some fields, get all the fields in fields that are not in exclude
    if exclude:
        fields = set(ipdata.valid_fields).difference(set(exclude))

    with console.status(
            f"""Looking up {resource if resource else "this device's IP address"}""",
            spinner="dots12",
    ):
        data = _lookup(ipdata, resource, fields=fields)

    if copy:
        pyperclip.copy(json.dumps(data))
        print(f"📋️ Copied result to clipboard!")
    elif raw:
        print(data)
    elif pretty_print:
        pretty_print_data(data)
    else:
        print_json(data=data)
Beispiel #8
0
 def __str__(self):
     return str(rich.print_json(data=json.loads(self.json()), sort_keys=True))
Beispiel #9
0
 def json_print(self, obj):
     try:
         print_json(data=obj)
     except Exception:
         pprint(obj)
Beispiel #10
0
    def _run_flow(
        self,
        flow_class: Union[str, Type[Flow]],
        design: Design,
        flow_settings: Union[None, Dict[str, Any], Flow.Settings],
        depender: Optional[Flow],
    ) -> Flow:
        if self.run_in_existing_dir:
            log.error(
                "run_in_existing_dir should only be used during Xeda's development!"
            )
        if isinstance(flow_class, str):
            flow_class = get_flow_class(flow_class)
        if flow_settings is None:
            flow_settings = {}
        elif isinstance(flow_settings, Flow.Settings):
            flow_settings = asdict(flow_settings)
        if self.debug:
            print("flow_settings: ", flow_settings)
        flow_settings = flow_class.Settings(**flow_settings)
        if self.debug:
            flow_settings.debug = True

        flow_name = flow_class.name
        # GOTCHA: design contains tb settings even for simulation flows
        # removing tb from hash for sim flows creates a mismatch for different flows of the same design
        design_hash = _semantic_hash(design)
        flowrun_hash = _semantic_hash(
            dict(
                flow_name=flow_name,
                flow_settings=flow_settings,
                xeda_version=__version__,
            ), )
        run_path = self._get_flow_run_path(
            design.name,
            flow_name,
            design_hash,
            flowrun_hash,
        )

        settings_json = run_path / "settings.json"
        results_json = run_path / "results.json"

        previous_results = None
        if (depender and self.cached_dependencies and run_path.exists()
                and settings_json.exists() and results_json.exists()):
            prev_results, prev_settings = None, None
            try:
                with open(settings_json) as f:
                    prev_settings = json.load(f)
                with open(results_json) as f:
                    prev_results = json.load(f)
            except TypeError:
                pass
            except ValueError:
                pass
            if prev_results and prev_results.get("success") and prev_settings:
                if (prev_settings.get("flow_name") == flow_name
                        and prev_settings.get("design_hash") == design_hash
                        and prev_settings.get("flowrun_hash") == flowrun_hash):
                    previous_results = prev_results
                else:
                    log.warning("%s does not contain the expected settings",
                                prev_settings)
            else:
                log.warning(
                    "Could not find valid results/settings from a previous run in %s",
                    run_path,
                )

        if not previous_results:
            if not self.run_in_existing_dir and run_path.exists():
                backup_existing(run_path)
            run_path.mkdir(parents=True)

            if self.dump_settings_json:
                log.info("dumping effective settings to %s", settings_json)
                all_settings = dict(
                    design=design,
                    design_hash=design_hash,
                    flow_name=flow_name,
                    flow_settings=flow_settings,
                    xeda_version=__version__,
                    flowrun_hash=flowrun_hash,
                )
                dump_json(all_settings, settings_json)

        with WorkingDirectory(run_path):
            log.debug("Instantiating flow from %s", flow_class)
            flow = flow_class(flow_settings, design, run_path)
            flow.design_hash = design_hash
            flow.flow_hash = flowrun_hash
            flow.timestamp = datetime.now().strftime("%Y-%m-%d-%H%M%S")
            # flow execution time includes init() as well as execution of all its dependency flows
            flow.init_time = time.monotonic()
            flow.init()

        for dep_cls, dep_settings in flow.dependencies:
            # merge with existing self.flows[dep].settings
            # NOTE this allows dependency flow to make changes to 'design'
            log.info(
                "Running dependency: %s (%s.%s)",
                dep_cls.name,
                dep_cls.__module__,
                dep_cls.__qualname__,
            )
            completed_dep = self._run_flow(dep_cls,
                                           design,
                                           dep_settings,
                                           depender=flow)
            if not completed_dep.succeeded:
                log.critical("Dependency flow: %s failed!", dep_cls.name)
                raise FlowDependencyFailure()
            flow.completed_dependencies.append(completed_dep)

        flow.results["design"] = flow.design.name
        flow.results["flow"] = flow.name
        success = True

        if previous_results:
            log.warning("Using previous run results and artifacts from %s",
                        run_path)
            flow.results = previous_results
        else:
            with WorkingDirectory(run_path):
                try:
                    flow.run()
                except NonZeroExitCode as e:
                    log.critical("Execution of %s returned %d",
                                 e.command_args[0], e.exit_code)
                    success = False
                if flow.init_time is not None:
                    flow.results.runtime = time.monotonic() - flow.init_time
                try:
                    success &= flow.parse_reports()
                except Exception as e:  # pylint: disable=broad-except
                    log.critical("parse_reports throw an exception: %s", e)
                    if success:  # if so far so good this is a bug!
                        raise e from None
                    success = False
                if not success:
                    log.error("Failure was reported in the parsed results.")
                flow.results.success = success

        if flow.artifacts and flow.succeeded:

            def default_encoder(x: Any) -> str:
                if isinstance(x, (PosixPath, os.PathLike)):
                    return str(os.path.relpath(x, flow.run_path))
                return str(x)

            print(f"Generated artifacts in {flow.run_path}:")  # FIXME
            print_json(data=flow.artifacts, default=default_encoder)  # FIXME

        flow.results.success = success
        if not success:
            # set success=false if execution failed
            log.critical("%s failed!", flow.name)

        if self.dump_results_json:
            dump_json(flow.results, results_json)
            log.info("Results written to %s", results_json)

        if self.display_results:
            print_results(
                flow,
                title=f"{flow.name} Results",
                skip_if_empty={"artifacts", "reports"},
            )
        return flow