def test_recommendation_type_enum(): assert RecommendationType.by_name("stable") == RecommendationType.STABLE assert RecommendationType.by_name("testing") == RecommendationType.TESTING assert RecommendationType.by_name("latest") == RecommendationType.LATEST with pytest.raises(ValueError): RecommendationType.by_name("unknown") assert len(RecommendationType) == 3
def advise( click_ctx: click.Context, *, beam_width: int, count: int, limit: int, output: str, recommendation_type: str, requirements_format: str, requirements: str, predictor: str, predictor_config: Optional[str] = None, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, requirements_locked: Optional[str] = None, runtime_environment: Optional[str] = None, seed: Optional[int] = None, pipeline: Optional[str] = None, prescription: Optional[str] = None, constraints: Optional[str] = None, user_stack_scoring: bool = True, dev: bool = False, labels: Optional[str] = None, ): """Advise package and package versions in the given stack or on solely package only.""" parameters = locals() parameters.pop("click_ctx") if pipeline and prescription: sys.exit("Options --pipeline/--prescription are disjoint") if library_usage: if os.path.isfile(library_usage): try: with open(library_usage, "r") as f: library_usage = json.load(f) except Exception: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) # Show library usage in the final report. parameters["library_usage"] = library_usage labels_dict = {} if labels: if os.path.isfile(labels): try: with open(labels, "r") as f: labels_dict = json.load(f) except Exception: _LOGGER.error("Failed to load labels file %r", labels) raise else: labels_dict = json.loads(labels) # Show labels in the final report. parameters["labels"] = labels_dict runtime_environment = RuntimeEnvironment.load(runtime_environment) recommendation_type = RecommendationType.by_name(recommendation_type) _LOGGER.info("Using recommendation type %s", recommendation_type.name.lower()) requirements_format = PythonRecommendationOutput.by_name( requirements_format) project = _instantiate_project(requirements, requirements_locked, runtime_environment=runtime_environment, constraints=constraints) pipeline_config = None if pipeline: pipeline_config = PipelineBuilder.load(pipeline) parameters["project"] = project.to_dict() if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() prescription_instance = None if prescription: if len(prescription) == 1: # Click does not support multiple parameters when supplied via env vars. Perform split on delimiter. prescription_instance = Prescription.load( *prescription[0].split(",")) else: prescription_instance = Prescription.load(*prescription) predictor_class, predictor_kwargs = _get_adviser_predictor( predictor, recommendation_type) predictor_kwargs = _get_predictor_kwargs( predictor_config) or predictor_kwargs predictor_instance = predictor_class(**predictor_kwargs, keep_history=plot is not None) # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) _LOGGER.info( "Starting resolver using %r predictor with random seed set to %r, predictor parameters: %r", predictor_class.__name__, seed, predictor_kwargs, ) random.seed(seed) termial_random.seed(seed) resolver = Resolver.get_adviser_instance( predictor=predictor_instance, project=project, labels=labels_dict, library_usage=library_usage, recommendation_type=recommendation_type, limit=limit, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, pipeline_config=pipeline_config, prescription=prescription_instance, cli_parameters=parameters, ) del prescription # No longer needed, garbage collect it. print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output, pretty=not no_pretty, )) exit_code = subprocess_run( resolver, print_func, plot=plot, result_dict={"parameters": parameters}, with_devel=dev, user_stack_scoring=user_stack_scoring, verbose=click_ctx.parent.params.get("verbose", False), ) # Push metrics. if _THOTH_METRICS_PUSHGATEWAY_URL: _METRIC_INFO.labels(_THOTH_DEPLOYMENT_NAME, analyzer_version).inc() _METRIC_DATABASE_SCHEMA_SCRIPT.labels( analyzer_name, resolver.graph.get_script_alembic_version_head(), _THOTH_DEPLOYMENT_NAME).inc() try: _LOGGER.debug("Submitting metrics to Prometheus pushgateway %s", _THOTH_METRICS_PUSHGATEWAY_URL) push_to_gateway(_THOTH_METRICS_PUSHGATEWAY_URL, job="adviser", registry=prometheus_registry) except Exception: _LOGGER.exception("An error occurred when pushing metrics") click_ctx.exit(int(exit_code != 0))
def advise( click_ctx, requirements, requirements_format=None, requirements_locked=None, recommendation_type=None, runtime_environment=None, output=None, no_pretty=False, files=False, count=None, limit=None, library_usage=None, limit_latest_versions=None, ): """Advise package and package versions in the given stack or on solely package only.""" _LOGGER.debug("Passed arguments: %s", locals()) limit = int(limit) if limit else None count = int(count) if count else None # A special value of -1 signalizes no limit/count, this is a workaround for Click's option parser. if count == -1: count = None if limit == -1: limit = None if limit_latest_versions == -1: limit_latest_versions = None if library_usage: if os.path.isfile(library_usage): try: library_usage = json.loads(Path(library_usage).read_text()) except Exception as exc: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) runtime_environment = RuntimeEnvironment.load(runtime_environment) recommendation_type = RecommendationType.by_name(recommendation_type) requirements_format = PythonRecommendationOutput.by_name(requirements_format) result = { "error": None, "report": [], "stack_info": None, "advised_configuration": None, "pipeline_configuration": None, "parameters": { "runtime_environment": runtime_environment.to_dict(), "recommendation_type": recommendation_type.name, "library_usage": library_usage, "requirements_format": requirements_format.name, "limit": limit, "limit_latest_versions": limit_latest_versions, "count": count, "no_pretty": no_pretty, }, "input": None, } try: project = _instantiate_project( requirements, requirements_locked, files, runtime_environment ) result["input"] = project.to_dict() if runtime_environment: _LOGGER.info( "Runtime environment configuration:\n%s", json.dumps(runtime_environment.to_dict(), sort_keys=True, indent=2), ) else: _LOGGER.info("No runtime environment configuration supplied") if library_usage: _LOGGER.info( "Library usage:\n%s", json.dumps(library_usage, sort_keys=True, indent=2), ) else: _LOGGER.info("No library usage supplied") stack_info, advised_configuration, report, pipeline_configuration = Adviser.compute_on_project( project, recommendation_type=recommendation_type, library_usage=library_usage, count=count, limit=limit, limit_latest_versions=limit_latest_versions, ) except ThothAdviserException as exc: # TODO: we should extend exceptions so they are capable of storing more info. if isinstance(exc, InternalError): # Re-raise internal exceptions that shouldn't occur here. raise _LOGGER.exception("Error during computing recommendation: %s", str(exc)) result["error"] = True result["report"] = [([{"justification": f"{str(exc)}", "type": "ERROR"}], None)] except NoReleasesFound as exc: result["error"] = True result["report"] = [([{ "justification": f"{str(exc)}; analysis of the missing package will be " f"automatically scheduled by the system", "type": "ERROR" }], None)] except (SolverException, UnableLock) as exc: result["error"] = True result["report"] = [([{"justification": str(exc), "type": "ERROR"}], None)] else: # Convert report to a dict so its serialized. result["report"] = [ (justification, project.to_dict(), overall_score) for justification, project, overall_score in report ] # Report error if we did not find any recommendation to the user, the # stack_info carries information on why it hasn't been found. result["error"] = len(result["report"]) == 0 result["stack_info"] = stack_info if result["error"]: result["stack_info"].append({ "type": "ERROR", "justification": "Recommendation engine did not produce any stacks" }) result["advised_configuration"] = advised_configuration result["pipeline_configuration"] = pipeline_configuration print_command_result( click_ctx, result, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output, pretty=not no_pretty, ) return int(result["error"] is True)
def advise( click_ctx: click.Context, *, beam_width: int, count: int, limit: int, output: str, recommendation_type: str, requirements_format: str, requirements: str, predictor: str, predictor_config: Optional[str] = None, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, requirements_locked: Optional[str] = None, runtime_environment: Optional[str] = None, seed: Optional[int] = None, pipeline: Optional[str] = None, user_stack_scoring: bool = True, dev: bool = False, ): """Advise package and package versions in the given stack or on solely package only.""" parameters = locals() parameters.pop("click_ctx") if library_usage: if os.path.isfile(library_usage): try: library_usage = json.loads(Path(library_usage).read_text()) except Exception: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) runtime_environment = RuntimeEnvironment.load(runtime_environment) recommendation_type = RecommendationType.by_name(recommendation_type) requirements_format = PythonRecommendationOutput.by_name( requirements_format) project = _instantiate_project(requirements, requirements_locked, runtime_environment) pipeline_config = None if pipeline is None else PipelineBuilder.load( pipeline) parameters["project"] = project.to_dict() if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() predictor_class, predictor_kwargs = _get_adviser_predictor( predictor, recommendation_type) predictor_kwargs = _get_predictor_kwargs( predictor_config) or predictor_kwargs predictor_instance = predictor_class(**predictor_kwargs, keep_history=plot is not None) # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) _LOGGER.info( "Starting resolver using %r predictor with random seed set to %r, predictor parameters: %r", predictor_class.__name__, seed, predictor_kwargs, ) random.seed(seed) termial_random.seed(seed) resolver = Resolver.get_adviser_instance( predictor=predictor_instance, project=project, library_usage=library_usage, recommendation_type=recommendation_type, limit=limit, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, pipeline_config=pipeline_config, cli_parameters=parameters, ) print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output, pretty=not no_pretty, )) exit_code = subprocess_run( resolver, print_func, plot=plot, result_dict={"parameters": parameters}, with_devel=dev, user_stack_scoring=user_stack_scoring, ) click_ctx.exit(int(exit_code != 0))