Exemplo n.º 1
0
    def test_from_dict_duplicate_name_error(self) -> None:
        """Test raising duplicate error if unit names clash."""
        unit = {
            "name": "BootUnit",
            "type": "boot",
            "should_include": {
                "adviser_pipeline": True
            },
            "run": {
                "log": {
                    "message": "Some text printed",
                    "type": "INFO"
                }
            },
        }

        prescription = {
            "units": {
                "boots": [unit, dict(unit)],
            },
        }

        with pytest.raises(PrescriptionDuplicateUnitNameError):
            Prescription.from_dict(prescription,
                                   prescription_name="thoth",
                                   prescription_release="2021.06.15")
Exemplo n.º 2
0
    def test_from_dict_duplicate_name_error(self) -> None:
        """Test raising duplicate error if unit names clash."""
        unit = {
            "name": "BootUnit",
            "type": "boot",
            "should_include": {
                "adviser_pipeline": True
            },
            "run": {
                "log": {
                    "message": "Some text printed",
                    "type": "INFO"
                }
            },
        }

        prescription = {
            "apiVersion": "thoth-station.ninja/v1",
            "kind": "prescription",
            "spec": {
                "name": "thoth",
                "release": "2021.03.30",
                "units": {
                    "boots": [unit, dict(unit)],
                },
            },
        }

        with pytest.raises(PrescriptionDuplicateUnitNameError):
            Prescription.from_dict(prescription)
Exemplo n.º 3
0
def validate_prescription(prescription: str) -> None:
    """Validate the given prescription."""
    _LOGGER.info("Loading prescription %r", prescription)
    with open(prescription, "r") as prescription_file:
        content = yaml.safe_load(prescription_file)

    Prescription.validate(content)
    _LOGGER.info("Prescription %r validated successfully", prescription)
Exemplo n.º 4
0
 def test_verify_multiple_should_include(
         self, builder_context: PipelineBuilderContext) -> None:
     """Verify multiple should_include calls do not loop endlessly."""
     builder_context.recommendation_type = RecommendationType.LATEST
     prescription_path = str(self.data_dir / "prescriptions" / "basic.yaml")
     builder_context.prescription = Prescription.load(prescription_path)
     self.verify_multiple_should_include(builder_context)
Exemplo n.º 5
0
    def test_run(self, context: Context) -> None:
        """Test remapping UBI to RHEL."""
        assert not context.stack_info

        prescription_path = str(self.data_dir / "prescriptions" / "basic.yaml")
        context.prescription = Prescription.load(prescription_path)

        unit = self.UNIT_TESTED()
        unit.pre_run()
        with self.UNIT_TESTED.assigned_context(context):
            unit.run()

        assert context.stack_info, "No stack info provided"
        assert len(context.stack_info) == 1
        assert context.stack_info[0]["type"] == "INFO"
        assert context.stack_info[0][
            "message"] == "Using prescription 'thoth' release '2020.03.18'"
        assert self.verify_justification_schema(context.stack_info) is True
Exemplo n.º 6
0
def validate_prescription(prescriptions: str, show_unit_names: bool,
                          output: str) -> None:
    """Validate the given prescription."""
    _LOGGER.info("Validating prescriptions in %r", prescriptions)
    prescription = Prescription.validate(prescriptions)
    _LOGGER.info("Prescriptions %r validated successfully", prescriptions)

    result = {
        "prescriptions": [{
            "name": p[0],
            "release": p[1]
        } for p in prescription.prescriptions],
        "count": {
            "boots_count": sum(1 for _ in prescription.iter_boot_units()),
            "pseudonyms_count":
            sum(1 for _ in prescription.iter_pseudonym_units()),
            "sieves_count": sum(1 for _ in prescription.iter_sieve_units()),
            "steps_count": sum(1 for _ in prescription.iter_step_units()),
            "strides_count": sum(1 for _ in prescription.iter_stride_units()),
            "wraps_count": sum(1 for _ in prescription.iter_wrap_units()),
        },
        "count_all":
        sum(1 for _ in prescription.units),
    }

    if show_unit_names:
        result["boots"] = sorted(prescription.boots_dict.keys())
        result["pseudonyms"] = sorted(prescription.pseudonyms_dict.keys())
        result["sieves"] = sorted(prescription.sieves_dict.keys())
        result["steps"] = sorted(prescription.steps_dict.keys())
        result["strides"] = sorted(prescription.strides_dict.keys())
        result["wraps"] = sorted(prescription.wraps_dict.keys())

    yaml.safe_dump(result, sys.stdout)

    if output:
        _LOGGER.info("Writing validated prescriptions to %r", output)
        with open(output, "wb") as fp:
            pickle.dump(prescription, fp)
Exemplo n.º 7
0
    def test_load(self) -> None:
        """Test loading prescription."""
        prescription_path = str(self.data_dir / "prescriptions" / "basic.yaml")
        instance = Prescription.load(prescription_path)
        assert instance is not None
        assert list(instance.boots_dict) == ["thoth.BootUnit"]
        assert list(instance.pseudonyms_dict) == ["thoth.PseudonymUnit"]
        assert list(instance.sieves_dict) == ["thoth.SieveUnit"]
        assert list(instance.steps_dict) == ["thoth.StepUnit"]
        assert list(instance.strides_dict) == ["thoth.StrideUnit"]
        assert list(instance.wraps_dict) == ["thoth.WrapUnit"]

        assert [u.get_unit_name()
                for u in instance.iter_boot_units()] == ["thoth.BootUnit"]
        assert [u.get_unit_name() for u in instance.iter_pseudonym_units()
                ] == ["thoth.PseudonymUnit"]
        assert [u.get_unit_name()
                for u in instance.iter_sieve_units()] == ["thoth.SieveUnit"]
        assert [u.get_unit_name()
                for u in instance.iter_step_units()] == ["thoth.StepUnit"]
        assert [u.get_unit_name()
                for u in instance.iter_stride_units()] == ["thoth.StrideUnit"]
        assert [u.get_unit_name()
                for u in instance.iter_wrap_units()] == ["thoth.WrapUnit"]
Exemplo n.º 8
0
 def test_from_dict_validate_error(self) -> None:
     """Test raising an error if schema validation fails."""
     with pytest.raises(PrescriptionSchemaError):
         Prescription.from_dict({"foo": "bar"})
Exemplo n.º 9
0
 def test_should_include(self,
                         builder_context: PipelineBuilderContext) -> None:
     """Test including this pipeline unit."""
     prescription_path = str(self.data_dir / "prescriptions" / "basic.yaml")
     builder_context.prescription = Prescription.load(prescription_path)
     assert list(self.UNIT_TESTED.should_include(builder_context)) == [{}]
Exemplo n.º 10
0
def dependency_monkey(
    click_ctx: click.Context,
    *,
    beam_width: int,
    count: int,
    decision_type: str,
    predictor: str,
    report_output: str,
    requirements: str,
    requirements_format: str,
    stack_output: str,
    predictor_config: Optional[str] = None,
    context: Optional[str] = None,
    dry_run: bool = False,
    library_usage: Optional[str] = None,
    limit_latest_versions: Optional[int] = None,
    no_pretty: bool = False,
    plot: Optional[str] = None,
    runtime_environment: str = None,
    seed: Optional[int] = None,
    pipeline: Optional[str] = None,
    prescription: Optional[str] = None,
    dev: bool = False,
):
    """Generate software stacks based on all valid resolutions that conform version ranges."""
    parameters = locals()
    parameters.pop("click_ctx")

    if pipeline and prescription:
        sys.exit("Options --pipeline/--prescription are disjoint")

    if library_usage:
        if os.path.isfile(library_usage):
            try:
                with open(library_usage, "r") as f:
                    library_usage = json.load(f)
            except Exception:
                _LOGGER.error("Failed to load library usage file %r",
                              library_usage)
                raise
        else:
            library_usage = json.loads(library_usage)

        # Show library usage in the final report.
        parameters["library_usage"] = library_usage

    runtime_environment = RuntimeEnvironment.load(runtime_environment)
    parameters["runtime_environment"] = runtime_environment.to_dict()

    decision_type = DecisionType.by_name(decision_type)
    requirements_format = PythonRecommendationOutput.by_name(
        requirements_format)
    project = _instantiate_project(requirements,
                                   runtime_environment=runtime_environment)
    parameters["requirements"] = project.pipfile.to_dict()
    parameters["project"] = project.to_dict()

    pipeline_config = None if pipeline is None else PipelineBuilder.load(
        pipeline)
    if pipeline_config is not None:
        parameters["pipeline"] = pipeline_config.to_dict()

    prescription_instance = None
    if prescription:
        if len(prescription) == 1:
            # Click does not support multiple parameters when supplied via env vars. Perform split on delimiter.
            prescription_instance = Prescription.load(
                *prescription[0].split(","))
        else:
            prescription_instance = Prescription.load(*prescription)

    # Use current time to make sure we have possibly reproducible runs - the seed is reported.
    seed = seed if seed is not None else int(time.time())
    predictor_class = _get_dependency_monkey_predictor(predictor,
                                                       decision_type)
    predictor_kwargs = _get_predictor_kwargs(predictor_config)
    predictor_instance = predictor_class(**predictor_kwargs,
                                         keep_history=plot is not None)
    _LOGGER.info(
        "Starting resolver using predictor %r with random seed set to %r, predictor parameters: %r",
        predictor_class.__name__,
        seed,
        predictor_kwargs,
    )
    random.seed(seed)
    termial_random.seed(seed)

    resolver = Resolver.get_dependency_monkey_instance(
        predictor=predictor_instance,
        project=project,
        library_usage=library_usage,
        count=count,
        beam_width=beam_width,
        limit_latest_versions=limit_latest_versions,
        decision_type=decision_type,
        pipeline_config=pipeline_config,
        prescription=prescription_instance,
        cli_parameters=parameters,
    )

    del prescription  # No longer needed, garbage collect it.

    context_content = {}
    try:
        with open(context) as f:
            context_content = json.load(f)
    except (FileNotFoundError, IOError):
        # IOError raised if context is too large to be handled with open.
        context_content = json.loads(context)
    parameters["context"] = context_content

    dependency_monkey_runner = DependencyMonkey(
        resolver=resolver,
        stack_output=stack_output,
        context=context_content,
        dry_run=dry_run,
        decision_type=decision_type,
    )

    print_func = _PrintFunc(
        partial(
            print_command_result,
            click_ctx=click_ctx,
            analyzer=analyzer_name,
            analyzer_version=analyzer_version,
            output=report_output,
            pretty=not no_pretty,
        ))

    exit_code = subprocess_run(
        dependency_monkey_runner,
        print_func,
        result_dict={"parameters": parameters},
        plot=plot,
        with_devel=dev,
        user_stack_scoring=False,
        # Keep verbose output (stating pipeline units run) in dependency-monkey.
        verbose=True,
    )

    click_ctx.exit(int(exit_code != 0))
Exemplo n.º 11
0
def advise(
    click_ctx: click.Context,
    *,
    beam_width: int,
    count: int,
    limit: int,
    output: str,
    recommendation_type: str,
    requirements_format: str,
    requirements: str,
    predictor: str,
    predictor_config: Optional[str] = None,
    library_usage: Optional[str] = None,
    limit_latest_versions: Optional[int] = None,
    no_pretty: bool = False,
    plot: Optional[str] = None,
    requirements_locked: Optional[str] = None,
    runtime_environment: Optional[str] = None,
    seed: Optional[int] = None,
    pipeline: Optional[str] = None,
    prescription: Optional[str] = None,
    constraints: Optional[str] = None,
    user_stack_scoring: bool = True,
    dev: bool = False,
    labels: Optional[str] = None,
):
    """Advise package and package versions in the given stack or on solely package only."""
    parameters = locals()
    parameters.pop("click_ctx")

    if pipeline and prescription:
        sys.exit("Options --pipeline/--prescription are disjoint")

    if library_usage:
        if os.path.isfile(library_usage):
            try:
                with open(library_usage, "r") as f:
                    library_usage = json.load(f)
            except Exception:
                _LOGGER.error("Failed to load library usage file %r",
                              library_usage)
                raise
        else:
            library_usage = json.loads(library_usage)

        # Show library usage in the final report.
        parameters["library_usage"] = library_usage

    labels_dict = {}
    if labels:
        if os.path.isfile(labels):
            try:
                with open(labels, "r") as f:
                    labels_dict = json.load(f)
            except Exception:
                _LOGGER.error("Failed to load labels file %r", labels)
                raise
        else:
            labels_dict = json.loads(labels)

        # Show labels in the final report.
        parameters["labels"] = labels_dict

    runtime_environment = RuntimeEnvironment.load(runtime_environment)
    recommendation_type = RecommendationType.by_name(recommendation_type)
    _LOGGER.info("Using recommendation type %s",
                 recommendation_type.name.lower())

    requirements_format = PythonRecommendationOutput.by_name(
        requirements_format)
    project = _instantiate_project(requirements,
                                   requirements_locked,
                                   runtime_environment=runtime_environment,
                                   constraints=constraints)

    pipeline_config = None
    if pipeline:
        pipeline_config = PipelineBuilder.load(pipeline)

    parameters["project"] = project.to_dict()
    if pipeline_config is not None:
        parameters["pipeline"] = pipeline_config.to_dict()

    prescription_instance = None
    if prescription:
        if len(prescription) == 1:
            # Click does not support multiple parameters when supplied via env vars. Perform split on delimiter.
            prescription_instance = Prescription.load(
                *prescription[0].split(","))
        else:
            prescription_instance = Prescription.load(*prescription)

    predictor_class, predictor_kwargs = _get_adviser_predictor(
        predictor, recommendation_type)
    predictor_kwargs = _get_predictor_kwargs(
        predictor_config) or predictor_kwargs
    predictor_instance = predictor_class(**predictor_kwargs,
                                         keep_history=plot is not None)

    # Use current time to make sure we have possibly reproducible runs - the seed is reported.
    seed = seed if seed is not None else int(time.time())
    _LOGGER.info(
        "Starting resolver using %r predictor with random seed set to %r, predictor parameters: %r",
        predictor_class.__name__,
        seed,
        predictor_kwargs,
    )
    random.seed(seed)
    termial_random.seed(seed)

    resolver = Resolver.get_adviser_instance(
        predictor=predictor_instance,
        project=project,
        labels=labels_dict,
        library_usage=library_usage,
        recommendation_type=recommendation_type,
        limit=limit,
        count=count,
        beam_width=beam_width,
        limit_latest_versions=limit_latest_versions,
        pipeline_config=pipeline_config,
        prescription=prescription_instance,
        cli_parameters=parameters,
    )

    del prescription  # No longer needed, garbage collect it.

    print_func = _PrintFunc(
        partial(
            print_command_result,
            click_ctx=click_ctx,
            analyzer=analyzer_name,
            analyzer_version=analyzer_version,
            output=output,
            pretty=not no_pretty,
        ))

    exit_code = subprocess_run(
        resolver,
        print_func,
        plot=plot,
        result_dict={"parameters": parameters},
        with_devel=dev,
        user_stack_scoring=user_stack_scoring,
        verbose=click_ctx.parent.params.get("verbose", False),
    )

    # Push metrics.
    if _THOTH_METRICS_PUSHGATEWAY_URL:
        _METRIC_INFO.labels(_THOTH_DEPLOYMENT_NAME, analyzer_version).inc()
        _METRIC_DATABASE_SCHEMA_SCRIPT.labels(
            analyzer_name, resolver.graph.get_script_alembic_version_head(),
            _THOTH_DEPLOYMENT_NAME).inc()

        try:
            _LOGGER.debug("Submitting metrics to Prometheus pushgateway %s",
                          _THOTH_METRICS_PUSHGATEWAY_URL)
            push_to_gateway(_THOTH_METRICS_PUSHGATEWAY_URL,
                            job="adviser",
                            registry=prometheus_registry)
        except Exception:
            _LOGGER.exception("An error occurred when pushing metrics")

    click_ctx.exit(int(exit_code != 0))
Exemplo n.º 12
0
 def test_from_dict_validate_error(self) -> None:
     """Test raising an error if schema validation fails."""
     with pytest.raises(PrescriptionSchemaError):
         Prescription.from_dict({"foo": "bar"},
                                prescription_name="thoth",
                                prescription_release="2021.06.15")
Exemplo n.º 13
0
def validate_prescription(prescriptions: str) -> None:
    """Validate the given prescription."""
    _LOGGER.info("Validating prescriptions in %r", prescriptions)
    Prescription.validate(prescriptions)
    _LOGGER.info("Prescriptions %r validated successfully", prescriptions)