Beispiel #1
0
    def get(self, request):
        metric = request.GET["metric"]
        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        settings = get_training_settings(request.user)

        issues = get_issues(
            fields=[
                metric.split()[0],
                settings.get("mark_up_source"),
                "Description_tr",
                "Assignee",
                "Reporter",
            ],
            filters=filters,
        )

        df = pd.DataFrame.from_records(issues)

        if metric.split()[0] not in ("Resolution", "Priority"):
            if settings["mark_up_source"] and settings["mark_up_entities"]:
                for area in settings["mark_up_entities"]:
                    if area["area_of_testing"] == metric.split()[0]:
                        df = mark_up_series(
                            df,
                            settings["mark_up_source"],
                            metric.split()[0],
                            area["entities"],
                        )

        significant_terms = calculate_significance_weights(df, metric)
        context = {"significant_terms": significant_terms}

        return Response(context)
def get_term_metrics(issues: pd.DataFrame, aot: dict) -> list:
    """Generates metrics for significant terms calculation.

    Parameters:
    ----------
    issues:
        Bug reports.
    aot:
        Areas of testing.

    Returns:
    ----------
        Metrics represented as Metric Value pairs.
    """

    metrics = []

    for metric in SIGNIFICANT_TERMS_METRICS:
        series = issues[metric]
        for category in series.dropna().unique().tolist():
            if category and check_required_percentage(series, category):
                metrics.append(" ".join([metric, category]))

    if aot:
        aot_metrics = []
        for area in aot["mark_up_entities"]:
            series = mark_up_series(
                issues, aot["source_field"], "aot", area["entities"]
            ).aot
            if check_required_percentage(series, 1):
                aot_metrics.append(area["area_of_testing"])

        metrics.extend(aot_metrics)

    return metrics
Beispiel #3
0
    def post(self, request):

        user = request.user

        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        fields = get_issues_fields(request.user)
        issues = get_issues_dataframe(filters=filters, fields=fields)

        if issues.empty:
            raise BugsNotFoundWarning

        source_field = get_source_field(user)
        if source_field not in issues.columns:
            raise InvalidSourceField

        resolutions = (
            [resolution["value"] for resolution in get_bug_resolutions(user)]
            if len(get_bug_resolutions(user)) != 0
            else []
        )

        areas_of_testing = []

        mark_up_entities = get_mark_up_entities(user)
        if source_field:
            areas_of_testing = [
                area["area_of_testing"] for area in mark_up_entities
            ] + ["Other"]
            for area in mark_up_entities:
                issues = mark_up_series(
                    issues,
                    get_source_field(user),
                    area["area_of_testing"],
                    area["entities"],
                )
            issues = mark_up_other_data(issues, areas_of_testing)

        train(
            user,
            issues,
            areas_of_testing,
            resolutions,
        )

        clear_cache(
            ["qa_metrics:predictions_page", "qa_metrics:predictions_table"],
            request.user.id,
        )

        context = {
            "result": "success",
        }
        return Response(context, status=200)
Beispiel #4
0
    def post(self, request):
        instance = request.user

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        fields = get_issues_fields(request.user)
        df = pd.DataFrame(get_issues(filters=filters, fields=fields))

        # New predictions will be appended after training.
        delete_old_predictions()

        settings = get_training_settings(request.user)

        if settings["mark_up_source"] not in df.columns:
            raise InvalidMarkUpSource

        resolutions = ([
            resolution["value"] for resolution in settings["bug_resolution"]
        ] if len(settings["bug_resolution"]) != 0 else [])

        areas_of_testing = []

        if settings["mark_up_source"]:
            areas_of_testing = [
                area["area_of_testing"]
                for area in settings["mark_up_entities"]
            ] + ["Other"]
            for area in settings["mark_up_entities"]:
                df = mark_up_series(
                    df,
                    settings["mark_up_source"],
                    area["area_of_testing"],
                    area["entities"],
                )
            df = mark_up_other_data(df, areas_of_testing)

        delete_training_data(get_archive_path(instance))

        train(
            instance,
            df,
            areas_of_testing,
            resolutions,
        )

        context = {
            "result": "success",
        }

        process = Process(target=append_predictions, args=(request.user, ))
        process.start()

        return Response(context, status=200)
Beispiel #5
0
    def post(self, request):
        metric = request.GET["metric"]
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        source_field = get_source_field(request.user)

        issues = get_issues_dataframe(
            fields=[
                metric.split()[0],
                source_field,
                "Description_tr",
                "Assignee",
                "Reporter",
            ],
            filters=filters,
        )

        if issues.empty:
            return Response({})

        mark_up_entities = get_mark_up_entities(request.user)
        if metric.split()[0] not in ("Resolution", "Priority"):
            if source_field and mark_up_entities:
                for area in mark_up_entities:
                    if area["area_of_testing"] == metric.split()[0]:
                        issues = mark_up_series(
                            issues,
                            source_field,
                            metric.split()[0],
                            area["entities"],
                        )

        significant_terms = calculate_significance_weights(issues, metric)
        context = {"significant_terms": significant_terms}

        return Response(context)