def dependency_monkey( click_ctx: click.Context, *, beam_width: int, count: int, decision_type: str, predictor: str, report_output: str, requirements: str, requirements_format: str, stack_output: str, predictor_config: Optional[str] = None, context: Optional[str] = None, dry_run: bool = False, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, runtime_environment: str = None, seed: Optional[int] = None, pipeline: Optional[str] = None, prescription: Optional[str] = None, dev: bool = False, ): """Generate software stacks based on all valid resolutions that conform version ranges.""" parameters = locals() parameters.pop("click_ctx") if pipeline and prescription: sys.exit("Options --pipeline/--prescription are disjoint") if library_usage: if os.path.isfile(library_usage): try: with open(library_usage, "r") as f: library_usage = json.load(f) except Exception: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) # Show library usage in the final report. parameters["library_usage"] = library_usage runtime_environment = RuntimeEnvironment.load(runtime_environment) parameters["runtime_environment"] = runtime_environment.to_dict() decision_type = DecisionType.by_name(decision_type) requirements_format = PythonRecommendationOutput.by_name( requirements_format) project = _instantiate_project(requirements, runtime_environment=runtime_environment) parameters["requirements"] = project.pipfile.to_dict() parameters["project"] = project.to_dict() pipeline_config = None if pipeline is None else PipelineBuilder.load( pipeline) if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() prescription_instance = None if prescription: if len(prescription) == 1: # Click does not support multiple parameters when supplied via env vars. Perform split on delimiter. prescription_instance = Prescription.load( *prescription[0].split(",")) else: prescription_instance = Prescription.load(*prescription) # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) predictor_class = _get_dependency_monkey_predictor(predictor, decision_type) predictor_kwargs = _get_predictor_kwargs(predictor_config) predictor_instance = predictor_class(**predictor_kwargs, keep_history=plot is not None) _LOGGER.info( "Starting resolver using predictor %r with random seed set to %r, predictor parameters: %r", predictor_class.__name__, seed, predictor_kwargs, ) random.seed(seed) termial_random.seed(seed) resolver = Resolver.get_dependency_monkey_instance( predictor=predictor_instance, project=project, library_usage=library_usage, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, decision_type=decision_type, pipeline_config=pipeline_config, prescription=prescription_instance, cli_parameters=parameters, ) del prescription # No longer needed, garbage collect it. context_content = {} try: with open(context) as f: context_content = json.load(f) except (FileNotFoundError, IOError): # IOError raised if context is too large to be handled with open. context_content = json.loads(context) parameters["context"] = context_content dependency_monkey_runner = DependencyMonkey( resolver=resolver, stack_output=stack_output, context=context_content, dry_run=dry_run, decision_type=decision_type, ) print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=report_output, pretty=not no_pretty, )) exit_code = subprocess_run( dependency_monkey_runner, print_func, result_dict={"parameters": parameters}, plot=plot, with_devel=dev, user_stack_scoring=False, # Keep verbose output (stating pipeline units run) in dependency-monkey. verbose=True, ) click_ctx.exit(int(exit_code != 0))
def advise( click_ctx: click.Context, *, beam_width: int, count: int, limit: int, output: str, recommendation_type: str, requirements_format: str, requirements: str, predictor: str, predictor_config: Optional[str] = None, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, requirements_locked: Optional[str] = None, runtime_environment: Optional[str] = None, seed: Optional[int] = None, pipeline: Optional[str] = None, user_stack_scoring: bool = True, dev: bool = False, ): """Advise package and package versions in the given stack or on solely package only.""" parameters = locals() parameters.pop("click_ctx") if library_usage: if os.path.isfile(library_usage): try: library_usage = json.loads(Path(library_usage).read_text()) except Exception: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) runtime_environment = RuntimeEnvironment.load(runtime_environment) recommendation_type = RecommendationType.by_name(recommendation_type) requirements_format = PythonRecommendationOutput.by_name( requirements_format) project = _instantiate_project(requirements, requirements_locked, runtime_environment) pipeline_config = None if pipeline is None else PipelineBuilder.load( pipeline) parameters["project"] = project.to_dict() if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() predictor_class, predictor_kwargs = _get_adviser_predictor( predictor, recommendation_type) predictor_kwargs = _get_predictor_kwargs( predictor_config) or predictor_kwargs predictor_instance = predictor_class(**predictor_kwargs, keep_history=plot is not None) # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) _LOGGER.info( "Starting resolver using %r predictor with random seed set to %r, predictor parameters: %r", predictor_class.__name__, seed, predictor_kwargs, ) random.seed(seed) termial_random.seed(seed) resolver = Resolver.get_adviser_instance( predictor=predictor_instance, project=project, library_usage=library_usage, recommendation_type=recommendation_type, limit=limit, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, pipeline_config=pipeline_config, cli_parameters=parameters, ) print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output, pretty=not no_pretty, )) exit_code = subprocess_run( resolver, print_func, plot=plot, result_dict={"parameters": parameters}, with_devel=dev, user_stack_scoring=user_stack_scoring, ) click_ctx.exit(int(exit_code != 0))
def advise( click_ctx: click.Context, *, beam_width: int, count: int, limit: int, output: str, recommendation_type: str, requirements_format: str, requirements: str, predictor: str, predictor_config: Optional[str] = None, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, requirements_locked: Optional[str] = None, runtime_environment: Optional[str] = None, seed: Optional[int] = None, pipeline: Optional[str] = None, prescription: Optional[str] = None, constraints: Optional[str] = None, user_stack_scoring: bool = True, dev: bool = False, labels: Optional[str] = None, ): """Advise package and package versions in the given stack or on solely package only.""" parameters = locals() parameters.pop("click_ctx") if pipeline and prescription: sys.exit("Options --pipeline/--prescription are disjoint") if library_usage: if os.path.isfile(library_usage): try: with open(library_usage, "r") as f: library_usage = json.load(f) except Exception: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) # Show library usage in the final report. parameters["library_usage"] = library_usage labels_dict = {} if labels: if os.path.isfile(labels): try: with open(labels, "r") as f: labels_dict = json.load(f) except Exception: _LOGGER.error("Failed to load labels file %r", labels) raise else: labels_dict = json.loads(labels) # Show labels in the final report. parameters["labels"] = labels_dict runtime_environment = RuntimeEnvironment.load(runtime_environment) recommendation_type = RecommendationType.by_name(recommendation_type) _LOGGER.info("Using recommendation type %s", recommendation_type.name.lower()) requirements_format = PythonRecommendationOutput.by_name( requirements_format) project = _instantiate_project(requirements, requirements_locked, runtime_environment=runtime_environment, constraints=constraints) pipeline_config = None if pipeline: pipeline_config = PipelineBuilder.load(pipeline) parameters["project"] = project.to_dict() if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() prescription_instance = None if prescription: if len(prescription) == 1: # Click does not support multiple parameters when supplied via env vars. Perform split on delimiter. prescription_instance = Prescription.load( *prescription[0].split(",")) else: prescription_instance = Prescription.load(*prescription) predictor_class, predictor_kwargs = _get_adviser_predictor( predictor, recommendation_type) predictor_kwargs = _get_predictor_kwargs( predictor_config) or predictor_kwargs predictor_instance = predictor_class(**predictor_kwargs, keep_history=plot is not None) # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) _LOGGER.info( "Starting resolver using %r predictor with random seed set to %r, predictor parameters: %r", predictor_class.__name__, seed, predictor_kwargs, ) random.seed(seed) termial_random.seed(seed) resolver = Resolver.get_adviser_instance( predictor=predictor_instance, project=project, labels=labels_dict, library_usage=library_usage, recommendation_type=recommendation_type, limit=limit, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, pipeline_config=pipeline_config, prescription=prescription_instance, cli_parameters=parameters, ) del prescription # No longer needed, garbage collect it. print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output, pretty=not no_pretty, )) exit_code = subprocess_run( resolver, print_func, plot=plot, result_dict={"parameters": parameters}, with_devel=dev, user_stack_scoring=user_stack_scoring, verbose=click_ctx.parent.params.get("verbose", False), ) # Push metrics. if _THOTH_METRICS_PUSHGATEWAY_URL: _METRIC_INFO.labels(_THOTH_DEPLOYMENT_NAME, analyzer_version).inc() _METRIC_DATABASE_SCHEMA_SCRIPT.labels( analyzer_name, resolver.graph.get_script_alembic_version_head(), _THOTH_DEPLOYMENT_NAME).inc() try: _LOGGER.debug("Submitting metrics to Prometheus pushgateway %s", _THOTH_METRICS_PUSHGATEWAY_URL) push_to_gateway(_THOTH_METRICS_PUSHGATEWAY_URL, job="adviser", registry=prometheus_registry) except Exception: _LOGGER.exception("An error occurred when pushing metrics") click_ctx.exit(int(exit_code != 0))
def dependency_monkey( click_ctx: click.Context, *, beam_width: int, count: int, decision_type: str, predictor: str, report_output: str, requirements: str, requirements_format: str, stack_output: str, context: Optional[str] = None, dry_run: bool = False, library_usage: Optional[str] = None, limit_latest_versions: Optional[int] = None, no_pretty: bool = False, plot: Optional[str] = None, runtime_environment: str = None, seed: Optional[int] = None, pipeline: Optional[str] = None, dev: bool = False, ): """Generate software stacks based on all valid resolutions that conform version ranges.""" parameters = locals() parameters.pop("click_ctx") if library_usage: if os.path.isfile(library_usage): try: library_usage = json.loads(Path(library_usage).read_text()) except Exception as exc: _LOGGER.error("Failed to load library usage file %r", library_usage) raise else: library_usage = json.loads(library_usage) runtime_environment = RuntimeEnvironment.load(runtime_environment) requirements_format = PythonRecommendationOutput.by_name(requirements_format) project = _instantiate_project( requirements, runtime_environment=runtime_environment ) pipeline_config = None if pipeline is None else PipelineBuilder.load(pipeline) parameters["project"] = project.to_dict() if pipeline_config is not None: parameters["pipeline"] = pipeline_config.to_dict() # Use current time to make sure we have possibly reproducible runs - the seed is reported. seed = seed if seed is not None else int(time.time()) _LOGGER.info( "Starting resolver using predictor %r with random seed set to %r", predictor, seed, ) random.seed(seed) resolver = Resolver.get_dependency_monkey_instance( predictor=getattr(predictors, predictor)(keep_history=plot is not None), project=project, library_usage=library_usage, count=count, beam_width=beam_width, limit_latest_versions=limit_latest_versions, decision_type=decision_type, pipeline_config=pipeline_config, cli_parameters=parameters, ) context_content = {} try: with open(context) as f: context_content = json.load(f) except (FileNotFoundError, IOError): # IOError raised if context is too large to be handled with open. context_content = json.loads(context) dependency_monkey_runner = DependencyMonkey( resolver=resolver, stack_output=stack_output, context=context_content, dry_run=dry_run, decision_type=decision_type, ) print_func = _PrintFunc( partial( print_command_result, click_ctx=click_ctx, analyzer=analyzer_name, analyzer_version=analyzer_version, output=report_output, pretty=not no_pretty, ) ) exit_code = subprocess_run( dependency_monkey_runner, print_func, result_dict={"parameters": parameters}, plot=plot, with_devel=dev, user_stack_scoring=False, ) click_ctx.exit(int(exit_code != 0))