def test_create_adviser_dataframe_with_results(self) -> None:
        """Test create of adviser dataframe from adviser documents."""
        adviser_files = Adviser.aggregate_adviser_results(
            repo_path=self._ADVISER_FOLDER_PATH, is_local=True)

        adviser_version = "0.21.1"
        justifications_collected = []
        justifications_collected = Adviser.create_adviser_dataframe(
            adviser_version=adviser_version,
            adviser_files=adviser_files,
            justifications_collected=justifications_collected,
        )

        adviser_dataframe = pd.DataFrame(justifications_collected)

        assert adviser_dataframe.shape[0] == 3
    def test_create_adviser_dataframe_heatmap(self) -> None:
        """Test create of adviser dataframe for heatmap plot from adviser documents."""
        adviser_files = Adviser.aggregate_adviser_results(
            repo_path=self._ADVISER_FOLDER_PATH, is_local=True)

        adviser_version = "0.21.1"
        justifications_collected = []
        justifications_collected = Adviser.create_adviser_dataframe(
            adviser_version=adviser_version,
            adviser_files=adviser_files,
            justifications_collected=justifications_collected,
        )

        adviser_dataframe = pd.DataFrame(justifications_collected)

        adviser_heatmap_df = Adviser.create_adviser_results_dataframe_heatmap(
            adviser_type_dataframe=adviser_dataframe,
            number_days=1,
        )

        last_date = [column for column in adviser_heatmap_df.columns][-1]
        csv = adviser_heatmap_df[[last_date]].to_csv(header=False)

        assert csv
示例#3
0
async def on_thamos_workflow_finished(*, action, base_repo_url, check_run_id,
                                      installation, payload, **kwargs):
    """Advise workflow has finished, now we need to send a check-run to the PR."""
    _LOGGER.info("on_thamos_workflow_finished: %s", kwargs)

    github_api: RawGitHubAPI = RUNTIME_CONTEXT.app_installation_client
    _LOGGER.info("on_thamos_workflow_finished: github_api=%s", github_api)

    repo = base_repo_url.split("/", 4)[-1]  # i.e.: thoth-station/Qeb-Hwt
    check_runs_url = f"https://api.github.com/repos/{repo}/check-runs/{check_run_id}"
    _LOGGER.info("on_thamos_workflow_finished: check_runs_url=%s",
                 check_runs_url)

    advise_url: str
    conclusion: str
    justification: str
    report: str
    text: str
    report_message: str

    async with aiohttp.ClientSession() as session:

        _LOGGER.info("on_thamos_workflow_finished: payload=%s", payload)

        if "exception" in payload:
            exception = payload["exception"]
        else:
            exception = None

        analysis_id = payload["analysis_id"]
        _LOGGER.info("on_thamos_workflow_finished: analysis_id=%s",
                     analysis_id)

        advise_url = urljoin(ADVISE_API_URL, analysis_id)
        _LOGGER.info("on_thamos_workflow_finished: advise_url=%s", advise_url)

        if exception:
            _LOGGER.info("on_thamos_workflow_finished: exception=%s",
                         exception)

            if "error_type" in payload:
                error_type: str = payload["error_type"]

                if error_type and error_type == "MissingThothYamlFile":
                    conclusion = "action_required"
                else:
                    conclusion = "failure"

            else:
                conclusion = "failure"

            justification = exception

            if exception == "Internal server error occurred, please contact administrator with provided details.":
                justification += (
                    "\nThoth Team is working to solve the issue as soon as possible. Thanks for your patience!"
                )

            report = "Report not produced."
            text = report
            report_message = ""

        if analysis_id:

            # TODO: Find alternative solution to this workround
            attempts = 1
            max_attempts = 6
            while attempts < max_attempts:
                try:
                    async with session.get(advise_url) as response:
                        _LOGGER.info(
                            "on_thamos_workflow_finished: response=%s",
                            response)
                        _LOGGER.info(
                            "on_thamos_workflow_finished: attempts=%s",
                            attempts)
                    if response.status == 200:
                        attempts = max_attempts
                    else:
                        attempts += 1
                except Exception:
                    continue

            async with session.get(advise_url) as response:

                if response.status != 200:
                    conclusion = "failure"
                    justification = "Could not retrieve analysis results."
                    report = ""
                    text = "Report cannot be provided, Please open an issue on Qeb-Hwt."
                    report_message = ""

                else:
                    adviser_payload: dict = await response.json()

                    adviser_result: dict = adviser_payload["result"]

                    if adviser_result["error"]:

                        error_msg: str = adviser_result["error_msg"]
                        conclusion = "failure"
                        justification = f"Analysis has encountered errors: {error_msg}."

                        if error_msg == "No direct dependencies found":
                            conclusion = "neutral"

                        if adviser_result["report"]:
                            report = adviser_result["report"]
                            text = "See the report below for more details."
                            report_message = "See the document below for more details."

                        else:
                            conclusion = "failure"
                            justification = f"Analysis has encountered errors: {error_msg}."

                            if adviser_result["report"]:
                                report = adviser_result["report"]
                                text = "See the report below for more details."
                                report_message = "See the document below for more details."

                            else:
                                text = "Analysis report is missing."
                                report_message = "See the document below for more details."

                    else:
                        conclusion = "success"

                        adviser_report: dict = adviser_result["report"]

                        justification = Adviser.create_pretty_report_from_json(
                            report=adviser_report, is_justification=True)

                        # Complete report
                        report = Adviser.create_pretty_report_from_json(
                            report=adviser_report)
                        _LOGGER.info(
                            "on_thamos_workflow_finished: len(report)=%s",
                            len(report))

                        # TODO: Split report results to include only relevant information
                        if len(report) > MAX_CHARACTERS_LENGTH:
                            _LOGGER.warning(
                                "on_thamos_workflow_finished: reduced len(report)=%s",
                                len(report))

                        text = f"Analysis report:\n{report}"
                        report_message = "See the document below for more details."
        else:
            analysis_id = "No-analysis-run"

        _LOGGER.info(
            "on_thamos_workflow_finished: sending check run: check_runs_url=%s",
            check_runs_url)
        _LOGGER.info(
            "on_thamos_workflow_finished: sending check run: conclusion=%s",
            conclusion)
        _LOGGER.info(
            "on_thamos_workflow_finished: sending check run: advise_url=%s",
            advise_url)
        _LOGGER.info(
            "on_thamos_workflow_finished: sending check run: analysis_id=%s",
            analysis_id)
        _LOGGER.info("on_thamos_workflow_finished: sending check run: text=%s",
                     text)
        _LOGGER.info("on_thamos_workflow_finished: sending check run: text=%s",
                     report_message)

    try:
        _LOGGER.info(
            "on_thamos_workflow_finished: installation_id=%s, check_run_url=%s",
            installation, check_runs_url)

        await github_api.patch(
            check_runs_url,
            preview_api_version="antiope",
            data={
                "name": CHECK_RUN_NAME,
                "status": "completed",
                "conclusion": conclusion,
                "completed_at": f"{datetime.utcnow().isoformat()}Z",
                "details_url": advise_url,
                "external_id": analysis_id,
                "output": {
                    "title":
                    "Thoth's Advise",
                    "text":
                    text,
                    "summary":
                    (f"Thoth's adviser finished with conclusion: '{conclusion}'\n\n"
                     f"Justification:\n{justification}\n\n"
                     f"{report_message}"),
                },
            },
        )
    except gidgethub.BadRequest as exc:
        _LOGGER.error(exc)

    _LOGGER.info(
        f"on_thamos_workflow_finished: finished with `thamos advise`, updated %s",
        check_run_id)
示例#4
0
def explore_adviser_files(
    current_initial_date: datetime.date,
    current_end_date: datetime.date,
    total_justifications: List[Dict[str, Any]],
    store_on_ceph: bool = False,
    store_on_public_bucket: bool = False,
):
    """Explore adviser files to gather info for contributors."""
    daily_processed_dataframes: Dict[str, pd.DataFrame] = {}

    adviser_files = Adviser.aggregate_adviser_results(
        start_date=current_initial_date, end_date=current_end_date)

    if not adviser_files:
        _LOGGER.info("No adviser files identifed!")
        return total_justifications

    dataframes = Adviser.create_adviser_dataframes(adviser_files=adviser_files)

    daily_justifications = retrieve_processed_justifications_dataframe(
        date_=current_initial_date, dataframes=dataframes)
    daily_processed_dataframes["adviser_justifications"] = pd.DataFrame(
        daily_justifications)

    if not daily_processed_dataframes[
            "adviser_justifications"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser justifications:"
            f'\n{daily_processed_dataframes["adviser_justifications"].to_csv(header=False, sep="`", index=False)}'
        )

    daily_statistics = retrieve_processed_statistics_dataframe(
        date_=current_initial_date, dataframes=dataframes)
    daily_processed_dataframes["adviser_statistics"] = pd.DataFrame(
        daily_statistics)

    if not daily_processed_dataframes[
            "adviser_statistics"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser statistics success rate:"
            f'\n{daily_processed_dataframes["adviser_statistics"].to_csv(header=False, sep="`", index=False)}'
        )

    daily_inputs_info = retrieve_processed_inputs_info_dataframe(
        date_=current_initial_date, dataframes=dataframes)
    daily_processed_dataframes["adviser_integration_info"] = pd.DataFrame(
        daily_inputs_info["integration_info"])
    daily_processed_dataframes["adviser_recommendation_info"] = pd.DataFrame(
        daily_inputs_info["recommendation_info"])
    daily_processed_dataframes["adviser_solver_info"] = pd.DataFrame(
        daily_inputs_info["solver_info"])
    daily_processed_dataframes["adviser_base_image_info"] = pd.DataFrame(
        daily_inputs_info["base_image_info"])
    daily_processed_dataframes["adviser_hardware_info"] = pd.DataFrame(
        daily_inputs_info["hardware_info"])

    if not daily_processed_dataframes[
            "adviser_integration_info"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser integration info stats:"
            f'\n{daily_processed_dataframes["adviser_integration_info"].to_csv(header=False, sep="`", index=False)}'
        )

    if not daily_processed_dataframes[
            "adviser_recommendation_info"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser recomendation info stats:"
            f'\n{daily_processed_dataframes["adviser_recommendation_info"].to_csv(header=False, sep="`", index=False)}'
        )

    if not daily_processed_dataframes[
            "adviser_solver_info"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser solver info stats:"
            f'\n{daily_processed_dataframes["adviser_solver_info"].to_csv(header=False, sep="`", index=False)}'
        )

    if not daily_processed_dataframes[
            "adviser_base_image_info"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser base image info stats:"
            f'\n{daily_processed_dataframes["adviser_base_image_info"].to_csv(header=False, sep="`", index=False)}'
        )

    if not daily_processed_dataframes[
            "adviser_hardware_info"].empty and not store_on_ceph:
        _LOGGER.info(
            "Adviser hardware info stats:"
            f'\n{daily_processed_dataframes["adviser_hardware_info"].to_csv(header=False, sep="`", index=False)}'
        )

    if store_on_ceph:
        for result_class, processed_df in daily_processed_dataframes.items():
            save_results_to_ceph(
                processed_df=processed_df,
                result_class=result_class,
                date_filter=current_initial_date,
                store_to_public_ceph=store_on_public_bucket,
            )

    total_justifications += daily_justifications

    return daily_processed_dataframes
 def test_get_adviser_files(self) -> None:
     """Test retrieving adviser results from local path."""
     adviser_files = Adviser.aggregate_adviser_results(
         repo_path=self._ADVISER_FOLDER_PATH, is_local=True)
     assert adviser_files