Ejemplo n.º 1
0
    def post(self, request):
        period = request.GET["period"]
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        issues = get_issues_dataframe(
            fields=["Key", "Created", "Resolved"], filters=filters
        )

        if issues.empty:
            return Response({})

        coordinates = get_defect_submission(issues, period)
        context = {
            **coordinates,
            **get_max_amount(coordinates),
            "period": period,
        }

        redis_conn.set(
            f"user:{request.user.id}:analysis_and_training:defect_submission",
            dumps(context),
        )

        return Response(context)
Ejemplo n.º 2
0
    def post(self, request):
        new_filters = request.data.get("filters", [])
        filters = get_qa_metrics_settings(request.user)
        fields = [field["name"] for field in filters]
        filters = update_drop_down_fields(
            filters,
            get_issues_dataframe(
                fields=fields, filters=[UNRESOLVED_BUGS_FILTER]
            ),
        )

        if new_filters:
            for new_filter in new_filters:
                for filter_ in filters:
                    if new_filter["name"] == filter_["name"]:
                        filter_.update(
                            {
                                "current_value": new_filter["current_value"],
                                "filtration_type": new_filter[
                                    "filtration_type"
                                ],
                                "exact_match": new_filter["exact_match"],
                            }
                        )
        filters += [UNRESOLVED_BUGS_FILTER]

        cached_filters = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:filters"
        )
        cached_filters = loads(cached_filters) if cached_filters else []

        context = {
            "records_count": {
                "total": get_issue_count(filters=[UNRESOLVED_BUGS_FILTER]),
                "filtered": get_issue_count(filters),
            },
            "filters": filters,
        }

        if not cached_filters or not check_filters_equality(
            filters, cached_filters
        ):
            clear_cache(
                [
                    "qa_metrics:predictions_table",
                    "qa_metrics:predictions_page",
                ],
                request.user.id,
            )
            for element in context:
                redis_conn.set(
                    f"user:{request.user.id}:qa_metrics:{element}",
                    dumps(context.get(element)),
                )

        return Response(context)
Ejemplo n.º 3
0
    def post(self, request):
        description = clean_text(request.data.get("description"))

        if not description.strip():
            raise CannotAnalyzeDescriptionWarning

        training_parameters = get_training_parameters(request.user)

        models = load_models(request.user)

        probabilities = dict()
        for parameter in training_parameters:
            if parameter in ["Time to Resolve", "Priority"]:
                probabilities[parameter] = get_probabilities(
                    description,
                    training_parameters[parameter],
                    models[parameter],
                )
            elif parameter == "Resolution":
                probabilities["resolution"] = calculate_resolution_predictions(
                    description,
                    training_parameters[parameter],
                    models[parameter],
                )
            elif parameter == "areas_of_testing":
                probabilities[
                    parameter] = calculate_area_of_testing_predictions(
                        description,
                        training_parameters[parameter],
                        models[parameter],
                    )

        for probability in probabilities:
            if probability == "resolution":
                for resolution in probabilities[probability]:
                    resolution_obj = probabilities[probability][resolution]
                    for metric in resolution_obj:
                        resolution_obj[metric] = convert_to_integer(
                            resolution_obj[metric])
            else:
                for metric in probabilities[probability]:
                    probabilities[probability][metric] = convert_to_integer(
                        probabilities[probability][metric])

        redis_conn.set(
            f"user:{request.user.id}:description_assessment:description",
            dumps(description),
        )
        redis_conn.set(
            f"user:{request.user.id}:description_assessment:probabilities",
            dumps(probabilities),
        )

        context = {"probabilities": probabilities}

        return Response(context)
Ejemplo n.º 4
0
    def post(self, request):
        def _convert_to_integer(value):
            return int(floor((value * 100) + 0.5))

        description = clean_text(request.data.get("description"))

        if not description.strip():
            raise DescriptionCantAnalyzedWarning

        archive_path = get_archive_path(request.user)
        training_parameters = read_from_archive(
            archive_path, TRAINING_PARAMETERS_FILENAME
        )
        probabilities = {}
        probabilities["resolution"] = calculate_resolution_predictions(
            description, training_parameters["Resolution"], archive_path
        )
        probabilities[
            "areas_of_testing"
        ] = calculate_area_of_testing_predictions(
            description, training_parameters["areas_of_testing"], archive_path
        )

        for metric in ["Time to Resolve", "Priority"]:
            probabilities[metric] = get_probabilities(
                description,
                training_parameters[metric],
                read_from_archive(archive_path, metric + ".sav"),
            )

        for probability in probabilities:
            if probability == "resolution":
                for resolution in probabilities[probability]:
                    resolution_obj = probabilities[probability][resolution]
                    for metric in resolution_obj:
                        resolution_obj[metric] = _convert_to_integer(
                            resolution_obj[metric]
                        )
            else:
                for metric in probabilities[probability]:
                    probabilities[probability][metric] = _convert_to_integer(
                        probabilities[probability][metric]
                    )

        redis_conn.set(f"description:{request.user.id}", dumps(description))
        redis_conn.set(
            f"probabilities:{request.user.id}", dumps(probabilities)
        )

        context = {"probabilities": probabilities}

        return Response(context)
Ejemplo n.º 5
0
    def get(self, request):
        cached_settings = redis_conn.get(
            f"settings:qa_metrics:{request.user.id}"
        )
        if cached_settings:
            return Response(loads(cached_settings))

        qa_metrics_settings = get_qa_metrics_settings(request.user)
        redis_conn.set(
            f"settings:qa_metrics:{request.user.id}",
            dumps(qa_metrics_settings),
        )

        return Response(qa_metrics_settings)
Ejemplo n.º 6
0
    def post(self, request):
        user = request.user
        offset = int(request.data.get("offset", DEFAULT_OFFSET))
        limit = int(request.data.get("limit", DEFAULT_LIMIT))

        cache = redis_conn.get(f"user:{user.id}:qa_metrics:filters")
        filters = loads(cache) if cache else [UNRESOLVED_BUGS_FILTER]

        check_training_files(user)

        cached_predictions = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:predictions_table"
        )

        if cached_predictions:
            predictions = DataFrame.from_records(loads(cached_predictions))
        else:
            predictions_table_fields = get_predictions_table_fields(user)

            issues = calculate_issues_predictions(
                user, predictions_table_fields, filters
            )

            if issues.empty:
                return Response({})

            predictions_table_fields.remove("Description_tr")
            predictions_table_fields.remove("Key")

            predictions = get_predictions_table(
                issues=issues,
                fields_settings=predictions_table_fields,
                offset=None,
                limit=None,
            )

            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_table",
                value=dumps(list(predictions.T.to_dict().values())),
                ex=60 * 30,
            )

        predictions = list(
            paginate_bugs(df=predictions, offset=offset, limit=limit)
            .T.to_dict()
            .values()
        )

        return Response(predictions)
Ejemplo n.º 7
0
    def get(self, request):
        cached_settings = redis_conn.get(
            f"settings:predictions_table:{request.user.id}"
        )
        if cached_settings:
            return Response(loads(cached_settings))

        predictions_table_settings = get_predictions_table_settings(
            request.user
        )
        redis_conn.set(
            f"settings:predictions_table:{request.user.id}",
            dumps(predictions_table_settings),
        )

        return Response(predictions_table_settings)
Ejemplo n.º 8
0
    def post(self, request):
        fields = get_issues_fields(request.user)
        issues = get_issues_dataframe(fields=fields)

        filters = get_filters(
            request.user,
            issues=issues,
        )

        if request.data.get("action") == "apply":
            new_filters = request.data.get("filters")
            if new_filters:
                for new_filter in new_filters:
                    for filter_ in filters:
                        if new_filter["name"] == filter_["name"]:
                            filter_.update(
                                {
                                    "current_value": new_filter[
                                        "current_value"
                                    ],
                                    "filtration_type": new_filter[
                                        "filtration_type"
                                    ],
                                    "exact_match": new_filter["exact_match"],
                                }
                            )
                issues = get_issues(filters=filters, fields=fields)

        issues_count = len(issues)
        context = {
            "records_count": {
                "total": get_issue_count(),
                "filtered": issues_count,
            },
            "filters": filters,
        }
        for element in context:
            redis_conn.set(
                f"user:{request.user.id}:analysis_and_training:{element}",
                dumps(context.get(element)),
            )

        remove_cache_record(
            "analysis_and_training:defect_submission", request.user.id
        )

        return Response(context)
Ejemplo n.º 9
0
    def post(self, request):
        data = request.data.copy()

        settings_serializer = UserPredictionsTableSerializer(
            data=data, many=True
        )
        settings_serializer.is_valid(raise_exception=True)

        UserPredictionsTableSerializer.delete_old_fields(data[0]["settings"])
        settings_serializer.save()

        redis_conn.set(
            f"settings:predictions_table:{request.user.id}",
            dumps(get_predictions_table_settings(request.user)),
        )

        return Response({"result": "success"})
Ejemplo n.º 10
0
    def post(self, request):
        data = request.data.copy()
        read_settings(data, request.user)

        settings_serializer = UserQAMetricsFilterSerializer(
            data=data, many=True
        )
        settings_serializer.is_valid(raise_exception=True)

        UserQAMetricsFilterSerializer.delete_old_filters(data[0]["settings"])
        settings_serializer.save()

        redis_conn.set(
            f"settings:qa_metrics:{request.user.id}",
            dumps(get_qa_metrics_settings(request.user)),
        )

        return Response({"result": "success"})
Ejemplo n.º 11
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        if cache:
            filters = loads(cache)
        else:
            fields = get_issues_fields(request.user)
            filters = get_filters(
                request.user, issues=get_issues_dataframe(fields=fields)
            )

            redis_conn.set(
                name=f"user:{request.user.id}:analysis_and_training:filters",
                value=dumps(filters),
                ex=60 * 30,
            )

        return Response(filters)
Ejemplo n.º 12
0
    def post(self, request):
        check_issues_exist()

        request_data = request.data.copy()
        read_settings(request_data, request.user)

        settings_serializer = UserQAMetricsFilterSerializer(data=request_data,
                                                            many=True)
        settings_serializer.is_valid(raise_exception=True)

        UserQAMetricsFilterSerializer.delete_old_filters(
            request_data[0]["settings"])
        settings_serializer.save()

        remove_cache_record("qa_metrics:filters", request.user.id)

        redis_conn.set(
            f"user:{request.user.id}:settings:qa_metrics",
            dumps(get_qa_metrics_settings(request.user)),
        )

        return Response({"result": "success"})
Ejemplo n.º 13
0
    def get(self, request):

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        if cache:
            return Response(loads(cache))
        fields = get_issues_fields(request.user)
        issues = get_issues(fields=fields)
        if not issues:
            # TODO: FE shows progress bar when data is empty
            return Response({})

        issues = pd.DataFrame.from_records(issues)
        freq_terms = calculate_frequently_terms(issues)
        statistics = calculate_statistics(
            df=issues, series=["Comments", "Attachments", "Time to Resolve"])
        defect_submission = calculate_defect_submission(df=issues,
                                                        period="Month")
        significant_terms = get_significant_terms(issues)
        filters = get_filters(request.user, issues=issues)

        context = {
            "records_count": {
                "total": len(issues),
                "filtered": len(issues)
            },
            "frequently_terms": freq_terms,
            "statistics": statistics,
            "submission_chart": defect_submission,
            "significant_terms": significant_terms,
            "filters": filters,
        }
        redis_conn.set(
            name=f"analysis_and_training:{request.user.id}",
            value=dumps(context),
            ex=60 * 30,
        )

        return Response(context)
Ejemplo n.º 14
0
    def get(self, request):
        user = request.user
        offset = DEFAULT_OFFSET
        limit = DEFAULT_LIMIT

        cached_predictions = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:predictions_page"
        )
        cached_filters = redis_conn.get(
            f"user:{request.user.id}:qa_metrics:filters"
        )
        filters = (
            loads(cached_filters)
            if cached_filters
            else [UNRESOLVED_BUGS_FILTER]
        )

        if cached_predictions:
            predictions = loads(cached_predictions)
        else:
            check_training_files(user)

            training_parameters = get_training_parameters(request.user)
            predictions_table_fields = get_predictions_table_fields(user)

            issues = calculate_issues_predictions(
                user, predictions_table_fields, filters
            )

            if issues.empty:
                return Response({})

            predictions_table_fields.remove("Description_tr")
            predictions_table_fields.remove("Key")

            predictions_table = get_predictions_table(
                issues=issues,
                fields_settings=predictions_table_fields,
                offset=None,
                limit=None,
            )

            prediction_table = paginate_bugs(predictions_table, offset, limit)

            areas_of_testing_percentage = calculate_aot_percentage(
                predictions_table["Area of Testing"]
            )
            priority_percentage = calculate_priority_percentage(
                predictions_table["Priority"], training_parameters["Priority"]
            )
            ttr_percentage = calculate_ttr_percentage(
                predictions_table["Time to Resolve"],
                training_parameters["Time to Resolve"],
            )

            resolution_percentage = calculate_resolution_percentage(
                predictions_table, training_parameters["Resolution"]
            )

            predictions = {
                "predictions_table": list(
                    prediction_table.T.to_dict().values()
                ),
                "prediction_table_rows_count": len(predictions_table),
                "areas_of_testing_chart": areas_of_testing_percentage,
                "priority_chart": priority_percentage,
                "ttr_chart": ttr_percentage,
                "resolution_chart": resolution_percentage,
            }

            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_page",
                value=dumps(predictions),
                ex=60 * 30,
            )
            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:filters",
                value=dumps(filters),
                ex=60 * 30,
            )
            redis_conn.set(
                name=f"user:{request.user.id}:qa_metrics:predictions_table",
                value=dumps(list(predictions_table.T.to_dict().values())),
                ex=60 * 30,
            )

        return Response(predictions)
Ejemplo n.º 15
0
    def post(self, request):
        fields = get_issues_fields(request.user)

        filters = get_filters(
            request.user,
            issues=pd.DataFrame.from_records(get_issues(fields=fields)),
        )

        if request.data.get("action") == "apply":
            new_filters = request.data.get("filters")
            if new_filters:
                for new_filter in new_filters:
                    for filter_ in filters:
                        if new_filter["name"] == filter_["name"]:
                            filter_.update({
                                "current_value":
                                new_filter["current_value"],
                                "filtration_type":
                                new_filter["filtration_type"],
                                "exact_match":
                                new_filter["exact_match"],
                            })
                issues = get_issues(filters=filters, fields=fields)
            else:
                issues = get_issues(fields=fields)
        else:
            issues = get_issues(fields=fields)

        if len(issues) == 0:
            context = {
                "records_count": {
                    "total": get_issue_count(),
                    "filtered": 0
                },
                "frequently_terms": [],
                "statistics": {},
                "submission_chart": {},
                "significant_terms": {},
                "filters": filters,
            }
            redis_conn.set(f"analysis_and_training:{request.user.id}",
                           dumps(context))
            return Response({})

        issues = pd.DataFrame.from_records(issues)
        freq_terms = calculate_frequently_terms(issues)
        statistics = calculate_statistics(
            df=issues, series=["Comments", "Attachments", "Time to Resolve"])
        submission_chart = calculate_defect_submission(df=issues,
                                                       period="Month")
        significant_terms = get_significant_terms(
            issues, get_training_settings(request.user))

        context = {
            "records_count": {
                "total": get_issue_count(),
                "filtered": len(issues),
            },
            "frequently_terms": freq_terms,
            "statistics": statistics,
            "submission_chart": submission_chart,
            "significant_terms": significant_terms,
            "filters": filters,
        }
        redis_conn.set(f"analysis_and_training:{request.user.id}",
                       dumps(context))

        return Response(context)