Esempio n. 1
0
    def post(self, request):
        user = request.user
        metric = request.data.get("metric")
        value = request.data.get("value")
        probabilities = loads(
            redis_conn.get(
                f"user:{user.id}:description_assessment:probabilities"))
        highlighted_terms = []

        if metric == "resolution":
            for resolution in probabilities[metric].copy():
                probabilities[metric].update(probabilities[metric][resolution])

        if probabilities[metric][value] > 0.05:

            description = loads(
                redis_conn.get(
                    f"user:{user.id}:description_assessment:description"))

            index = value
            if metric != "areas_of_testing":
                index = f"{metric.capitalize()}_{value}"

            top_terms = get_top_terms(request.user)[index].dropna().tolist()

            tfidf = StemmedTfidfVectorizer(stop_words=STOP_WORDS)
            tfidf.fit_transform([description])
            for term in tfidf.get_feature_names():
                if term in top_terms:
                    highlighted_terms.append(term)

        context = {"terms": highlighted_terms}

        return Response(context)
Esempio n. 2
0
    def post(self, request):
        highlighted_terms = []
        user = request.user
        metric = (
            request.data.get("metric")
            if request.data.get("metric") != "Areas of testing"
            else "areas_of_testing"
        )
        value = request.data.get("value")
        probabilities = loads(redis_conn.get(f"probabilities:{user.id}"))

        if probabilities[metric][value] > 0.05:
            archive_path = get_archive_path(user)
            description = loads(redis_conn.get(f"description:{user.id}"))

            index = metric
            if metric != "areas_of_testing":
                index = f"{metric}_{value}"
            top_terms = (
                read_from_archive(archive_path, TOP_TERMS_FILENAME)[index]
                .dropna()
                .tolist()
            )
            tfidf = StemmedTfidfVectorizer(stop_words=STOP_WORDS)
            tfidf.fit_transform([description])
            for term in tfidf.get_feature_names():
                if term in top_terms:
                    highlighted_terms.append(term)

        context = {"terms": highlighted_terms}

        return Response(context)
Esempio n. 3
0
    def post(self, request):
        user = request.user
        offset = int(request.data.get("offset", DEFAULT_OFFSET))
        limit = int(request.data.get("limit", DEFAULT_LIMIT))

        cache = redis_conn.get(f"user:{user.id}:qa_metrics:filters")
        filters = loads(cache) if cache else [UNRESOLVED_BUGS_FILTER]

        check_training_files(user)

        cached_predictions = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:predictions_table"
        )

        if cached_predictions:
            predictions = DataFrame.from_records(loads(cached_predictions))
        else:
            predictions_table_fields = get_predictions_table_fields(user)

            issues = calculate_issues_predictions(
                user, predictions_table_fields, filters
            )

            if issues.empty:
                return Response({})

            predictions_table_fields.remove("Description_tr")
            predictions_table_fields.remove("Key")

            predictions = get_predictions_table(
                issues=issues,
                fields_settings=predictions_table_fields,
                offset=None,
                limit=None,
            )

            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_table",
                value=dumps(list(predictions.T.to_dict().values())),
                ex=60 * 30,
            )

        predictions = list(
            paginate_bugs(df=predictions, offset=offset, limit=limit)
            .T.to_dict()
            .values()
        )

        return Response(predictions)
Esempio n. 4
0
    def post(self, request):
        period = request.GET["period"]
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        issues = get_issues_dataframe(
            fields=["Key", "Created", "Resolved"], filters=filters
        )

        if issues.empty:
            return Response({})

        coordinates = get_defect_submission(issues, period)
        context = {
            **coordinates,
            **get_max_amount(coordinates),
            "period": period,
        }

        redis_conn.set(
            f"user:{request.user.id}:analysis_and_training:defect_submission",
            dumps(context),
        )

        return Response(context)
Esempio n. 5
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        user = request.user

        issues = get_issues_dataframe(
            fields=[
                get_source_field(user),
                "Priority",
                "Resolution",
                "Description_tr",
                "Assignee",
                "Reporter",
            ],
            filters=filters,
        )

        if issues.empty:
            return Response({})

        settings = {
            "source_field": get_source_field(user),
            "bug_resolution": get_bug_resolutions(user),
            "mark_up_entities": get_mark_up_entities(user),
        }

        significant_terms = get_significant_terms(issues, settings)
        context = {"significant_terms": significant_terms}

        return Response(context)
Esempio n. 6
0
    def get(self, request):
        metric = request.GET["metric"]
        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        settings = get_training_settings(request.user)

        issues = get_issues(
            fields=[
                metric.split()[0],
                settings.get("mark_up_source"),
                "Description_tr",
                "Assignee",
                "Reporter",
            ],
            filters=filters,
        )

        df = pd.DataFrame.from_records(issues)

        if metric.split()[0] not in ("Resolution", "Priority"):
            if settings["mark_up_source"] and settings["mark_up_entities"]:
                for area in settings["mark_up_entities"]:
                    if area["area_of_testing"] == metric.split()[0]:
                        df = mark_up_series(
                            df,
                            settings["mark_up_source"],
                            metric.split()[0],
                            area["entities"],
                        )

        significant_terms = calculate_significance_weights(df, metric)
        context = {"significant_terms": significant_terms}

        return Response(context)
Esempio n. 7
0
def test_updated_settings_backup(sql_conn, test_user, default_settings):
    user_id = sql_conn.execute(
        "SELECT id FROM authentication_user WHERE email=(?)",
        (test_user["email"], ),
    ).fetchone()[0]
    cached_settings = loads(redis_conn.get(f"settings:backup:{user_id}"))

    assert cached_settings == default_settings
Esempio n. 8
0
    def post(self, request):

        user = request.user

        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        fields = get_issues_fields(request.user)
        issues = get_issues_dataframe(filters=filters, fields=fields)

        if issues.empty:
            raise BugsNotFoundWarning

        source_field = get_source_field(user)
        if source_field not in issues.columns:
            raise InvalidSourceField

        resolutions = (
            [resolution["value"] for resolution in get_bug_resolutions(user)]
            if len(get_bug_resolutions(user)) != 0
            else []
        )

        areas_of_testing = []

        mark_up_entities = get_mark_up_entities(user)
        if source_field:
            areas_of_testing = [
                area["area_of_testing"] for area in mark_up_entities
            ] + ["Other"]
            for area in mark_up_entities:
                issues = mark_up_series(
                    issues,
                    get_source_field(user),
                    area["area_of_testing"],
                    area["entities"],
                )
            issues = mark_up_other_data(issues, areas_of_testing)

        train(
            user,
            issues,
            areas_of_testing,
            resolutions,
        )

        clear_cache(
            ["qa_metrics:predictions_page", "qa_metrics:predictions_table"],
            request.user.id,
        )

        context = {
            "result": "success",
        }
        return Response(context, status=200)
Esempio n. 9
0
    def post(self, request):
        new_filters = request.data.get("filters", [])
        filters = get_qa_metrics_settings(request.user)
        fields = [field["name"] for field in filters]
        filters = update_drop_down_fields(
            filters,
            get_issues_dataframe(
                fields=fields, filters=[UNRESOLVED_BUGS_FILTER]
            ),
        )

        if new_filters:
            for new_filter in new_filters:
                for filter_ in filters:
                    if new_filter["name"] == filter_["name"]:
                        filter_.update(
                            {
                                "current_value": new_filter["current_value"],
                                "filtration_type": new_filter[
                                    "filtration_type"
                                ],
                                "exact_match": new_filter["exact_match"],
                            }
                        )
        filters += [UNRESOLVED_BUGS_FILTER]

        cached_filters = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:filters"
        )
        cached_filters = loads(cached_filters) if cached_filters else []

        context = {
            "records_count": {
                "total": get_issue_count(filters=[UNRESOLVED_BUGS_FILTER]),
                "filtered": get_issue_count(filters),
            },
            "filters": filters,
        }

        if not cached_filters or not check_filters_equality(
            filters, cached_filters
        ):
            clear_cache(
                [
                    "qa_metrics:predictions_table",
                    "qa_metrics:predictions_page",
                ],
                request.user.id,
            )
            for element in context:
                redis_conn.set(
                    f"user:{request.user.id}:qa_metrics:{element}",
                    dumps(context.get(element)),
                )

        return Response(context)
Esempio n. 10
0
    def post(self, request):
        instance = request.user

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        fields = get_issues_fields(request.user)
        df = pd.DataFrame(get_issues(filters=filters, fields=fields))

        # New predictions will be appended after training.
        delete_old_predictions()

        settings = get_training_settings(request.user)

        if settings["mark_up_source"] not in df.columns:
            raise InvalidMarkUpSource

        resolutions = ([
            resolution["value"] for resolution in settings["bug_resolution"]
        ] if len(settings["bug_resolution"]) != 0 else [])

        areas_of_testing = []

        if settings["mark_up_source"]:
            areas_of_testing = [
                area["area_of_testing"]
                for area in settings["mark_up_entities"]
            ] + ["Other"]
            for area in settings["mark_up_entities"]:
                df = mark_up_series(
                    df,
                    settings["mark_up_source"],
                    area["area_of_testing"],
                    area["entities"],
                )
            df = mark_up_other_data(df, areas_of_testing)

        delete_training_data(get_archive_path(instance))

        train(
            instance,
            df,
            areas_of_testing,
            resolutions,
        )

        context = {
            "result": "success",
        }

        process = Process(target=append_predictions, args=(request.user, ))
        process.start()

        return Response(context, status=200)
Esempio n. 11
0
    def get(self, request):
        cached_settings = redis_conn.get(
            f"settings:qa_metrics:{request.user.id}"
        )
        if cached_settings:
            return Response(loads(cached_settings))

        qa_metrics_settings = get_qa_metrics_settings(request.user)
        redis_conn.set(
            f"settings:qa_metrics:{request.user.id}",
            dumps(qa_metrics_settings),
        )

        return Response(qa_metrics_settings)
Esempio n. 12
0
    def get(self, request):
        period = request.GET["period"]
        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        bugs = pd.DataFrame(
            get_issues(fields=["Key", "Created"], filters=filters))

        if bugs.empty:
            return Response({})

        coordinates = calculate_defect_submission(bugs, period)

        context = {"submission_chart": coordinates}

        return Response(context)
Esempio n. 13
0
    def get(self, request):
        cached_settings = redis_conn.get(
            f"settings:predictions_table:{request.user.id}"
        )
        if cached_settings:
            return Response(loads(cached_settings))

        predictions_table_settings = get_predictions_table_settings(
            request.user
        )
        redis_conn.set(
            f"settings:predictions_table:{request.user.id}",
            dumps(predictions_table_settings),
        )

        return Response(predictions_table_settings)
Esempio n. 14
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None

        fields = get_issues_fields(request.user.id)
        issues = get_issues_dataframe(fields=fields, filters=filters)

        if issues.empty:
            return Response({})

        freq_terms = calculate_frequently_terms(issues)

        context = {"frequently_terms": freq_terms}

        return Response(context)
Esempio n. 15
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        if cache:
            filters = loads(cache)
        else:
            fields = get_issues_fields(request.user)
            filters = get_filters(
                request.user, issues=get_issues_dataframe(fields=fields)
            )

            redis_conn.set(
                name=f"user:{request.user.id}:analysis_and_training:filters",
                value=dumps(filters),
                ex=60 * 30,
            )

        return Response(filters)
Esempio n. 16
0
    def get(self, request):
        total_count = get_issue_count(filters=[UNRESOLVED_BUGS_FILTER])

        if not total_count:
            Response({})

        cache = redis_conn.get(f"user:{request.user.id}:qa_metrics:filters")

        filters = [UNRESOLVED_BUGS_FILTER]
        if cache:
            filters = loads(cache)

        context = {
            "records_count": {
                "total": total_count,
                "filtered": get_issue_count(filters),
            },
        }
        return Response(context)
Esempio n. 17
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None

        fields = get_issues_fields(request.user.id)
        issues = get_issues_dataframe(fields=fields, filters=filters)

        if issues.empty:
            return Response({})

        statistics = calculate_statistics(
            issues,
            ["Comments", "Attachments", "Time to Resolve"],
        )
        context = {"statistics": statistics}

        return Response(context)
Esempio n. 18
0
    def get(self, request):
        user = request.user

        check_training_files(user)

        cached_filters = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:filters"
        )
        if cached_filters:
            filters = loads(cached_filters)
        else:
            filters = get_qa_metrics_settings(user)
            fields = [field["name"] for field in filters]

            issues = get_issues_dataframe(
                fields=fields, filters=[UNRESOLVED_BUGS_FILTER]
            )
            filters = update_drop_down_fields(filters, issues)

        return Response(filters)
Esempio n. 19
0
    def post(self, request):
        metric = request.GET["metric"]
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        source_field = get_source_field(request.user)

        issues = get_issues_dataframe(
            fields=[
                metric.split()[0],
                source_field,
                "Description_tr",
                "Assignee",
                "Reporter",
            ],
            filters=filters,
        )

        if issues.empty:
            return Response({})

        mark_up_entities = get_mark_up_entities(request.user)
        if metric.split()[0] not in ("Resolution", "Priority"):
            if source_field and mark_up_entities:
                for area in mark_up_entities:
                    if area["area_of_testing"] == metric.split()[0]:
                        issues = mark_up_series(
                            issues,
                            source_field,
                            metric.split()[0],
                            area["entities"],
                        )

        significant_terms = calculate_significance_weights(issues, metric)
        context = {"significant_terms": significant_terms}

        return Response(context)
Esempio n. 20
0
    def get(self, request):

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        if cache:
            return Response(loads(cache))
        fields = get_issues_fields(request.user)
        issues = get_issues(fields=fields)
        if not issues:
            # TODO: FE shows progress bar when data is empty
            return Response({})

        issues = pd.DataFrame.from_records(issues)
        freq_terms = calculate_frequently_terms(issues)
        statistics = calculate_statistics(
            df=issues, series=["Comments", "Attachments", "Time to Resolve"])
        defect_submission = calculate_defect_submission(df=issues,
                                                        period="Month")
        significant_terms = get_significant_terms(issues)
        filters = get_filters(request.user, issues=issues)

        context = {
            "records_count": {
                "total": len(issues),
                "filtered": len(issues)
            },
            "frequently_terms": freq_terms,
            "statistics": statistics,
            "submission_chart": defect_submission,
            "significant_terms": significant_terms,
            "filters": filters,
        }
        redis_conn.set(
            name=f"analysis_and_training:{request.user.id}",
            value=dumps(context),
            ex=60 * 30,
        )

        return Response(context)
Esempio n. 21
0
    def get(self, request):

        total_count = get_issue_count()
        if not total_count:
            return Response({})

        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )

        filters = None
        if cache:
            filters = loads(cache)

        filtered_count = get_issue_count(filters)

        context = {
            "records_count": {
                "total": total_count,
                "filtered": filtered_count,
            },
        }

        return Response(context)
Esempio n. 22
0
    def get(self, request):
        user = request.user
        offset = DEFAULT_OFFSET
        limit = DEFAULT_LIMIT

        cached_predictions = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:predictions_page"
        )
        cached_filters = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:filters"
        )
        filters = (
            loads(cached_filters)
            if cached_filters
            else [UNRESOLVED_BUGS_FILTER]
        )

        if cached_predictions:
            predictions = loads(cached_predictions)
        else:
            check_training_files(user)

            training_parameters = get_training_parameters(request.user)
            predictions_table_fields = get_predictions_table_fields(user)

            issues = calculate_issues_predictions(
                user, predictions_table_fields, filters
            )

            if issues.empty:
                return Response({})

            predictions_table_fields.remove("Description_tr")
            predictions_table_fields.remove("Key")

            predictions_table = get_predictions_table(
                issues=issues,
                fields_settings=predictions_table_fields,
                offset=None,
                limit=None,
            )

            prediction_table = paginate_bugs(predictions_table, offset, limit)

            areas_of_testing_percentage = calculate_aot_percentage(
                predictions_table["Area of Testing"]
            )
            priority_percentage = calculate_priority_percentage(
                predictions_table["Priority"], training_parameters["Priority"]
            )
            ttr_percentage = calculate_ttr_percentage(
                predictions_table["Time to Resolve"],
                training_parameters["Time to Resolve"],
            )

            resolution_percentage = calculate_resolution_percentage(
                predictions_table, training_parameters["Resolution"]
            )

            predictions = {
                "predictions_table": list(
                    prediction_table.T.to_dict().values()
                ),
                "prediction_table_rows_count": len(predictions_table),
                "areas_of_testing_chart": areas_of_testing_percentage,
                "priority_chart": priority_percentage,
                "ttr_chart": ttr_percentage,
                "resolution_chart": resolution_percentage,
            }

            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_page",
                value=dumps(predictions),
                ex=60 * 30,
            )
            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:filters",
                value=dumps(filters),
                ex=60 * 30,
            )
            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_table",
                value=dumps(list(predictions_table.T.to_dict().values())),
                ex=60 * 30,
            )

        return Response(predictions)