Esempio n. 1
0
def test_temporary_config_resets_on_exception(caplog):
    with pytest.raises(ValueError):
        with temporary_logger_config(level=logging.CRITICAL, ):
            raise ValueError()

    logger = get_logger()
    assert logger.level == logging.DEBUG
Esempio n. 2
0
def test_temporary_config_sets_and_resets(caplog):
    with temporary_logger_config(
            level=logging.CRITICAL,
            stream_fmt="%(message)s",
            stream_datefmt="%H:%M:%S",
    ):
        logger = get_logger()
        assert logger.level == logging.CRITICAL
        for handler in logger.handlers:
            if isinstance(handler, logging.StreamHandler):
                assert handler.formatter._fmt == "%(message)s"
                assert handler.formatter.datefmt == "%H:%M:%S"
        logger.info("Info log not shown")
        logger.critical("Critical log shown")

    logger.info("Info log shown")
    for handler in logger.handlers:
        handler.flush()

    output = caplog.text
    assert "Info log not shown" not in output
    assert "Critical log shown" in output
    assert "Info log shown" in output

    assert logger.level == logging.DEBUG
    for handler in logger.handlers:
        if isinstance(handler, logging.StreamHandler):
            assert handler.formatter._fmt != "%(message)s"
            assert handler.formatter.datefmt != "%H:%M:%S"
Esempio n. 3
0
def test_temporary_config_does_not_require_all_args(caplog, level, stream_fmt,
                                                    stream_datefmt):
    with temporary_logger_config(
            level=level,
            stream_fmt=stream_fmt,
            stream_datefmt=stream_datefmt,
    ):
        pass
Esempio n. 4
0
def run(
    ctx,
    flow_or_group_id,
    project,
    path,
    module,
    name,
    labels,
    context_vars,
    params,
    execute,
    idempotency_key,
    schedule,
    log_level,
    param_file,
    run_name,
    quiet,
    no_logs,
    watch,
):
    """Run a flow"""
    # Since the old command was a subcommand of this, we have to do some
    # mucking to smoothly deprecate it. Can be removed with `prefect run flow`
    # is removed.
    if ctx.invoked_subcommand is not None:
        if any([params, no_logs, quiet, flow_or_group_id]):
            # These options are not supported by `prefect run flow`
            raise ClickException("Got unexpected extra argument (%s)" %
                                 ctx.invoked_subcommand)
        return

    # Define a simple function so we don't have to have a lot of `if not quiet` logic
    quiet_echo = ((lambda *_, **__: None) if quiet else
                  lambda *args, **kwargs: click.secho(*args, **kwargs))

    # Cast labels to a list instead of a tuple so we can extend it
    labels = list(labels)

    # Ensure that the user has not passed conflicting options
    given_lookup_options = {
        key
        for key, option in {
            "--id": flow_or_group_id,
            "--project": project,
            "--path": path,
            "--module": module,
        }.items() if option is not None
    }
    # Since `name` can be passed in conjunction with several options and also alone
    # it requires a special case here
    if not given_lookup_options and not name:
        raise ClickException("Received no options to look up the flow." +
                             FLOW_LOOKUP_MSG)
    if "--id" in given_lookup_options and name:
        raise ClickException("Received too many options to look up the flow; "
                             "cannot specifiy both `--name` and `--id`" +
                             FLOW_LOOKUP_MSG)
    if len(given_lookup_options) > 1:
        raise ClickException("Received too many options to look up the flow: "
                             f"{', '.join(given_lookup_options)}" +
                             FLOW_LOOKUP_MSG)

    # Load parameters and context ------------------------------------------------------
    context_dict = load_json_key_values(context_vars, "context")

    file_params = {}
    if param_file:

        try:
            with open(param_file) as fp:
                file_params = json.load(fp)
        except FileNotFoundError:
            raise TerminalError(
                f"Parameter file does not exist: {os.path.abspath(param_file)!r}"
            )
        except ValueError as exc:
            raise TerminalError(
                f"Failed to parse JSON at {os.path.abspath(param_file)!r}: {exc}"
            )

    cli_params = load_json_key_values(params, "parameter")
    conflicting_keys = set(cli_params.keys()).intersection(file_params.keys())
    if conflicting_keys:
        quiet_echo(
            "The following parameters were specified by file and CLI, the CLI value "
            f"will be used: {conflicting_keys}")
    params_dict = {**file_params, **cli_params}

    # Local flow run -------------------------------------------------------------------

    if path or module:
        # We can load a flow for local execution immediately if given a path or module,
        # otherwise, we'll lookup the flow then pull from storage for a local run
        with try_error_done("Retrieving local flow...",
                            quiet_echo,
                            traceback=True):
            flow = get_flow_from_path_or_module(path=path,
                                                module=module,
                                                name=name)

        # Set the desired log level
        if no_logs:
            log_level = 100  # CRITICAL is 50 so this should do it

        run_info = ""
        if params_dict:
            run_info += f"└── Parameters: {params_dict}\n"
        if context_dict:
            run_info += f"└── Context: {context_dict}\n"

        if run_info:
            quiet_echo("Configured local flow run")
            quiet_echo(run_info, nl=False)

        quiet_echo("Running flow locally...")
        with temporary_logger_config(
                level=log_level,
                stream_fmt="└── %(asctime)s | %(levelname)-7s | %(message)s",
                stream_datefmt="%H:%M:%S",
        ):
            with prefect.context(**context_dict):
                try:
                    result_state = flow.run(parameters=params_dict,
                                            run_on_schedule=schedule)
                except Exception as exc:
                    quiet_echo("Flow runner encountered an exception!")
                    log_exception(exc, indent=2)
                    raise TerminalError("Flow run failed!")

        if result_state.is_failed():
            quiet_echo("Flow run failed!", fg="red")
            sys.exit(1)
        else:
            quiet_echo("Flow run succeeded!", fg="green")

        return

    # Backend flow run -----------------------------------------------------------------

    if schedule:
        raise ClickException(
            "`--schedule` can only be specified for local flow runs")

    client = Client()

    # Validate the flow look up options we've been given and get the flow from the
    # backend
    with try_error_done("Looking up flow metadata...", quiet_echo):
        flow_view = get_flow_view(
            flow_or_group_id=flow_or_group_id,
            project=project,
            name=name,
        )

    if log_level:
        run_config = flow_view.run_config
        if not run_config.env:
            run_config.env = {}
        run_config.env["PREFECT__LOGGING__LEVEL"] = log_level
    else:
        run_config = None

    if execute:
        # Add a random label to prevent an agent from picking up this run
        labels.append(f"agentless-run-{str(uuid.uuid4())[:8]}")

    try:  # Handle keyboard interrupts during creation
        flow_run_id = None

        # Create a flow run in the backend
        with try_error_done(
                f"Creating run for flow {flow_view.name!r}...",
                quiet_echo,
                traceback=True,
                # Display 'Done' manually after querying for data to display so there is not
                # a lag
                skip_done=True,
        ):
            flow_run_id = client.create_flow_run(
                flow_id=flow_view.flow_id,
                parameters=params_dict,
                context=context_dict,
                # If labels is an empty list pass `None` to get defaults
                # https://github.com/PrefectHQ/server/blob/77c301ce0c8deda4f8771f7e9991b25e7911224a/src/prefect_server/api/runs.py#L136
                labels=labels or None,
                run_name=run_name,
                # We only use the run config for setting logging levels right now
                run_config=run_config,
                idempotency_key=idempotency_key,
            )

        if quiet:
            # Just display the flow run id in quiet mode
            click.echo(flow_run_id)
            flow_run = None
        else:
            # Grab information about the flow run (if quiet we can skip this query)
            flow_run = FlowRunView.from_flow_run_id(flow_run_id)
            run_url = client.get_cloud_url("flow-run", flow_run_id)

            # Display "Done" for creating flow run after pulling the info so there
            # isn't a weird lag
            quiet_echo(" Done", fg="green")
            quiet_echo(
                textwrap.dedent(f"""
                        └── Name: {flow_run.name}
                        └── UUID: {flow_run.flow_run_id}
                        └── Labels: {flow_run.labels}
                        └── Parameters: {flow_run.parameters}
                        └── Context: {flow_run.context}
                        └── URL: {run_url}
                        """).strip())

    except KeyboardInterrupt:
        # If the user interrupts here, they will expect the flow run to be cancelled
        quiet_echo("\nKeyboard interrupt detected! Aborting...", fg="yellow")
        if flow_run_id:
            client.cancel_flow_run(flow_run_id=flow_run_id)
            quiet_echo("Cancelled flow run.")
        else:
            # The flow run was not created so we can just exit
            quiet_echo("Aborted.")
        return

    # Handle agentless execution
    if execute:
        quiet_echo("Executing flow run...")
        try:
            with temporary_logger_config(
                    level=(100 if no_logs or quiet else
                           log_level),  # Disable logging if asked
                    stream_fmt=
                    "└── %(asctime)s | %(levelname)-7s | %(message)s",
                    stream_datefmt="%H:%M:%S",
            ):
                execute_flow_run_in_subprocess(flow_run_id)
        except KeyboardInterrupt:
            quiet_echo("Keyboard interrupt detected! Aborting...", fg="yellow")
            pass

    elif watch:
        try:
            quiet_echo("Watching flow run execution...")
            for log in watch_flow_run(
                    flow_run_id=flow_run_id,
                    stream_logs=not no_logs,
            ):
                level_name = logging.getLevelName(log.level)
                timestamp = log.timestamp.in_tz(tz="local")
                echo_with_log_color(
                    log.level,
                    f"└── {timestamp:%H:%M:%S} | {level_name:<7} | {log.message}",
                )

        except KeyboardInterrupt:
            quiet_echo("Keyboard interrupt detected!", fg="yellow")
            try:
                cancel = click.confirm(
                    "On exit, we can leave your flow run executing or cancel it.\n"
                    "Do you want to cancel this flow run?",
                    default=True,
                )
            except click.Abort:
                # A second keyboard interrupt will exit without cancellation
                pass
            else:
                if cancel:
                    client.cancel_flow_run(flow_run_id=flow_run_id)
                    quiet_echo("Cancelled flow run.", fg="green")
                    return

            quiet_echo("Exiting without cancelling flow run!", fg="yellow")
            raise  # Re-raise the interrupt

    else:
        # If not watching or executing, exit without checking state
        return

    # Get the final flow run state
    flow_run = FlowRunView.from_flow_run_id(flow_run_id)

    # Wait for the flow run to be done up to 3 seconds
    elapsed_time = 0
    while not flow_run.state.is_finished() and elapsed_time < 3:
        time.sleep(1)
        elapsed_time += 1
        flow_run = flow_run.get_latest()

    # Display the final state
    if flow_run.state.is_failed():
        quiet_echo("Flow run failed!", fg="red")
        sys.exit(1)
    elif flow_run.state.is_successful():
        quiet_echo("Flow run succeeded!", fg="green")
    else:
        quiet_echo(f"Flow run is in unexpected state: {flow_run.state}",
                   fg="yellow")
        sys.exit(1)