예제 #1
0
    def post(self, request):

        user = request.user

        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None
        fields = get_issues_fields(request.user)
        issues = get_issues_dataframe(filters=filters, fields=fields)

        if issues.empty:
            raise BugsNotFoundWarning

        source_field = get_source_field(user)
        if source_field not in issues.columns:
            raise InvalidSourceField

        resolutions = (
            [resolution["value"] for resolution in get_bug_resolutions(user)]
            if len(get_bug_resolutions(user)) != 0
            else []
        )

        areas_of_testing = []

        mark_up_entities = get_mark_up_entities(user)
        if source_field:
            areas_of_testing = [
                area["area_of_testing"] for area in mark_up_entities
            ] + ["Other"]
            for area in mark_up_entities:
                issues = mark_up_series(
                    issues,
                    get_source_field(user),
                    area["area_of_testing"],
                    area["entities"],
                )
            issues = mark_up_other_data(issues, areas_of_testing)

        train(
            user,
            issues,
            areas_of_testing,
            resolutions,
        )

        clear_cache(
            ["qa_metrics:predictions_page", "qa_metrics:predictions_table"],
            request.user.id,
        )

        context = {
            "result": "success",
        }
        return Response(context, status=200)
예제 #2
0
    def post(self, request):
        instance = request.user

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        filters = loads(cache)["filters"] if cache else None
        fields = get_issues_fields(request.user)
        df = pd.DataFrame(get_issues(filters=filters, fields=fields))

        # New predictions will be appended after training.
        delete_old_predictions()

        settings = get_training_settings(request.user)

        if settings["mark_up_source"] not in df.columns:
            raise InvalidMarkUpSource

        resolutions = ([
            resolution["value"] for resolution in settings["bug_resolution"]
        ] if len(settings["bug_resolution"]) != 0 else [])

        areas_of_testing = []

        if settings["mark_up_source"]:
            areas_of_testing = [
                area["area_of_testing"]
                for area in settings["mark_up_entities"]
            ] + ["Other"]
            for area in settings["mark_up_entities"]:
                df = mark_up_series(
                    df,
                    settings["mark_up_source"],
                    area["area_of_testing"],
                    area["entities"],
                )
            df = mark_up_other_data(df, areas_of_testing)

        delete_training_data(get_archive_path(instance))

        train(
            instance,
            df,
            areas_of_testing,
            resolutions,
        )

        context = {
            "result": "success",
        }

        process = Process(target=append_predictions, args=(request.user, ))
        process.start()

        return Response(context, status=200)
예제 #3
0
    def post(self, request):
        fields = get_issues_fields(request.user)
        issues = get_issues_dataframe(fields=fields)

        filters = get_filters(
            request.user,
            issues=issues,
        )

        if request.data.get("action") == "apply":
            new_filters = request.data.get("filters")
            if new_filters:
                for new_filter in new_filters:
                    for filter_ in filters:
                        if new_filter["name"] == filter_["name"]:
                            filter_.update(
                                {
                                    "current_value": new_filter[
                                        "current_value"
                                    ],
                                    "filtration_type": new_filter[
                                        "filtration_type"
                                    ],
                                    "exact_match": new_filter["exact_match"],
                                }
                            )
                issues = get_issues(filters=filters, fields=fields)

        issues_count = len(issues)
        context = {
            "records_count": {
                "total": get_issue_count(),
                "filtered": issues_count,
            },
            "filters": filters,
        }
        for element in context:
            redis_conn.set(
                f"user:{request.user.id}:analysis_and_training:{element}",
                dumps(context.get(element)),
            )

        remove_cache_record(
            "analysis_and_training:defect_submission", request.user.id
        )

        return Response(context)
예제 #4
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None

        fields = get_issues_fields(request.user.id)
        issues = get_issues_dataframe(fields=fields, filters=filters)

        if issues.empty:
            return Response({})

        freq_terms = calculate_frequently_terms(issues)

        context = {"frequently_terms": freq_terms}

        return Response(context)
예제 #5
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        if cache:
            filters = loads(cache)
        else:
            fields = get_issues_fields(request.user)
            filters = get_filters(
                request.user, issues=get_issues_dataframe(fields=fields)
            )

            redis_conn.set(
                name=f"user:{request.user.id}:analysis_and_training:filters",
                value=dumps(filters),
                ex=60 * 30,
            )

        return Response(filters)
예제 #6
0
    def get(self, request):
        cache = redis_conn.get(
            f"user:{request.user.id}:analysis_and_training:filters"
        )
        filters = loads(cache) if cache else None

        fields = get_issues_fields(request.user.id)
        issues = get_issues_dataframe(fields=fields, filters=filters)

        if issues.empty:
            return Response({})

        statistics = calculate_statistics(
            issues,
            ["Comments", "Attachments", "Time to Resolve"],
        )
        context = {"statistics": statistics}

        return Response(context)
예제 #7
0
    def get(self, request):

        cache = redis_conn.get(f"analysis_and_training:{request.user.id}")
        if cache:
            return Response(loads(cache))
        fields = get_issues_fields(request.user)
        issues = get_issues(fields=fields)
        if not issues:
            # TODO: FE shows progress bar when data is empty
            return Response({})

        issues = pd.DataFrame.from_records(issues)
        freq_terms = calculate_frequently_terms(issues)
        statistics = calculate_statistics(
            df=issues, series=["Comments", "Attachments", "Time to Resolve"])
        defect_submission = calculate_defect_submission(df=issues,
                                                        period="Month")
        significant_terms = get_significant_terms(issues)
        filters = get_filters(request.user, issues=issues)

        context = {
            "records_count": {
                "total": len(issues),
                "filtered": len(issues)
            },
            "frequently_terms": freq_terms,
            "statistics": statistics,
            "submission_chart": defect_submission,
            "significant_terms": significant_terms,
            "filters": filters,
        }
        redis_conn.set(
            name=f"analysis_and_training:{request.user.id}",
            value=dumps(context),
            ex=60 * 30,
        )

        return Response(context)
예제 #8
0
    def post(self, request):
        fields = get_issues_fields(request.user)

        filters = get_filters(
            request.user,
            issues=pd.DataFrame.from_records(get_issues(fields=fields)),
        )

        if request.data.get("action") == "apply":
            new_filters = request.data.get("filters")
            if new_filters:
                for new_filter in new_filters:
                    for filter_ in filters:
                        if new_filter["name"] == filter_["name"]:
                            filter_.update({
                                "current_value":
                                new_filter["current_value"],
                                "filtration_type":
                                new_filter["filtration_type"],
                                "exact_match":
                                new_filter["exact_match"],
                            })
                issues = get_issues(filters=filters, fields=fields)
            else:
                issues = get_issues(fields=fields)
        else:
            issues = get_issues(fields=fields)

        if len(issues) == 0:
            context = {
                "records_count": {
                    "total": get_issue_count(),
                    "filtered": 0
                },
                "frequently_terms": [],
                "statistics": {},
                "submission_chart": {},
                "significant_terms": {},
                "filters": filters,
            }
            redis_conn.set(f"analysis_and_training:{request.user.id}",
                           dumps(context))
            return Response({})

        issues = pd.DataFrame.from_records(issues)
        freq_terms = calculate_frequently_terms(issues)
        statistics = calculate_statistics(
            df=issues, series=["Comments", "Attachments", "Time to Resolve"])
        submission_chart = calculate_defect_submission(df=issues,
                                                       period="Month")
        significant_terms = get_significant_terms(
            issues, get_training_settings(request.user))

        context = {
            "records_count": {
                "total": get_issue_count(),
                "filtered": len(issues),
            },
            "frequently_terms": freq_terms,
            "statistics": statistics,
            "submission_chart": submission_chart,
            "significant_terms": significant_terms,
            "filters": filters,
        }
        redis_conn.set(f"analysis_and_training:{request.user.id}",
                       dumps(context))

        return Response(context)