Exemple #1
0
def get_significance_counts(submission, file, error_level):
    """ Gathers information for the signficances section of the active DABS dashboard.

            Args:
                submission: submission to get the significance counts for
                file: The type of file to get the significance counts for
                error_level: whether to get warning or error counts for the significance counts (possible: warning,
                             error, mixed)

            Returns:
                A response containing significance data of the provided submission for the active DABS dashboard.

            Raises:
                ResponseException if submission provided is a FABS submission.
        """
    if submission.d2_submission:
        raise ResponseException('Submission must be a DABS submission.', status=StatusCode.CLIENT_ERROR)

    # Basic data that can be gathered from just the submission and passed filters
    response = {
        'total_instances': 0,
        'rules': []
    }

    sess = GlobalDB.db().session

    # Initial query
    significance_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences,
                                    ErrorMetadata.rule_failed, RuleSetting.priority, RuleSql.category,
                                    RuleSetting.impact_id).\
        join(Job, Job.job_id == ErrorMetadata.job_id). \
        join(RuleSetting, RuleSetting.rule_label == ErrorMetadata.original_rule_label). \
        join(RuleSql, RuleSql.rule_label == ErrorMetadata.original_rule_label). \
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    significance_query = agency_settings_filter(sess, significance_query, agency_code, file)
    significance_query = rule_severity_filter(significance_query, error_level, ErrorMetadata)
    significance_query = file_filter(significance_query, RuleSetting, [file])

    # Ordering by significance to help process the results
    significance_query = significance_query.order_by(RuleSetting.priority)

    for result in significance_query.all():
        response['rules'].append({
            'rule_label': result.original_rule_label,
            'category': result.category,
            'significance': result.priority,
            'impact': RULE_IMPACT_DICT_ID[result.impact_id],
            'instances': result.occurrences
        })
        response['total_instances'] += result.occurrences

    # Calculate the percentages
    for rule_dict in response['rules']:
        rule_dict['percentage'] = round((rule_dict['instances']/response['total_instances'])*100, 1)

    return JsonResponse.create(StatusCode.OK, response)
Exemple #2
0
def get_impact_counts(submission, file, error_level):
    """ Gathers information for the impact count section of the active DABS dashboard.

            Args:
                submission: submission to get the impact counts for
                file: The type of file to get the impact counts for
                error_level: whether to get warning or error counts for the impact counts (possible: warning, error,
                    mixed)

            Returns:
                A response containing impact count information of the provided submission for the active DABS dashboard.

            Raises:
                ResponseException if submission provided is a FABS submission.
        """
    if submission.d2_submission:
        raise ResponseException('Submission must be a DABS submission.', status=StatusCode.CLIENT_ERROR)

    # Basic data that can be gathered from just the submission and passed filters
    response = {
        'low': {
            'total': 0,
            'rules': []
        },
        'medium': {
            'total': 0,
            'rules': []
        },
        'high': {
            'total': 0,
            'rules': []
        }
    }

    sess = GlobalDB.db().session

    # Initial query
    impact_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences, ErrorMetadata.rule_failed,
                              RuleSetting.impact_id).\
        join(Job, Job.job_id == ErrorMetadata.job_id). \
        join(RuleSetting, RuleSetting.rule_label == ErrorMetadata.original_rule_label). \
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    impact_query = agency_settings_filter(sess, impact_query, agency_code, file)
    impact_query = rule_severity_filter(impact_query, error_level, ErrorMetadata)
    impact_query = file_filter(impact_query, RuleSetting, [file])

    for result in impact_query.all():
        response[RULE_IMPACT_DICT_ID[result.impact_id]]['total'] += 1
        response[RULE_IMPACT_DICT_ID[result.impact_id]]['rules'].append({
            'rule_label': result.original_rule_label,
            'instances': result.occurrences,
            'rule_description': result.rule_failed
        })

    return JsonResponse.create(StatusCode.OK, response)
Exemple #3
0
def test_agency_settings_filter(database):
    sess = database.session

    default_setting = RuleSetting(file_id=FILE_TYPE_DICT_LETTER_ID['A'],
                                  target_file_id=None,
                                  agency_code=None,
                                  priority=1,
                                  impact_id=1,
                                  rule_label='A3')
    agency_setting = RuleSetting(file_id=FILE_TYPE_DICT_LETTER_ID['A'],
                                 target_file_id=None,
                                 agency_code='001',
                                 priority=1,
                                 impact_id=1,
                                 rule_label='A4')

    base_query = sess.query(RuleSetting)
    sess.add_all([default_setting, agency_setting])
    assert dashboard_helper.agency_settings_filter(
        sess, base_query, '000', 'A').first() == default_setting
    assert dashboard_helper.agency_settings_filter(
        sess, base_query, '001', 'A').first() == agency_setting
def active_submission_table(submission,
                            file,
                            error_level,
                            page=1,
                            limit=5,
                            sort='significance',
                            order='desc'):
    """ Gather a list of warnings/errors based on the filters provided to display in the active dashboard table.

        Args:
            submission: submission to get the table data for
            file: The type of file to get the table data for
            error_level: whether to get warnings, errors, or both for the table (possible: warning, error, mixed)
            page: page number to use in getting the list
            limit: the number of entries per page
            sort: the column to order on
            order: order ascending or descending

        Returns:
            A response containing a list of results for the active submission dashboard table and the metadata for
            the table.

        Raises:
            ResponseException if submission provided is a FABS submission.
    """
    if submission.is_fabs:
        raise ResponseException('Submission must be a DABS submission.',
                                status=StatusCode.CLIENT_ERROR)

    # Basic information that is provided by the user and defaults for the rest
    response = {
        'page_metadata': {
            'total': 0,
            'page': page,
            'limit': limit,
            'submission_id': submission.submission_id,
            'files': []
        },
        'results': []
    }

    # File type
    if file in ['A', 'B', 'C']:
        response['page_metadata']['files'] = [file]
    else:
        letters = file.split('-')[1]
        response['page_metadata']['files'] = [letters[:1], letters[1:]]

    sess = GlobalDB.db().session

    # Initial query
    table_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences, ErrorMetadata.rule_failed,
                             RuleSql.category, RuleSetting.priority, RuleImpact.name.label('impact_name')).\
        join(Job, Job.job_id == ErrorMetadata.job_id).\
        join(RuleSql, and_(RuleSql.rule_label == ErrorMetadata.original_rule_label,
                           RuleSql.file_id == ErrorMetadata.file_type_id,
                           is_not_distinct_from(RuleSql.target_file_id, ErrorMetadata.target_file_type_id))).\
        join(RuleSetting, and_(RuleSql.rule_label == RuleSetting.rule_label, RuleSql.file_id == RuleSetting.file_id,
                               is_not_distinct_from(RuleSql.target_file_id, RuleSetting.target_file_id))).\
        join(RuleImpact, RuleImpact.rule_impact_id == RuleSetting.impact_id).\
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    table_query = agency_settings_filter(sess, table_query, agency_code, file)
    table_query = rule_severity_filter(table_query, error_level, ErrorMetadata)
    table_query = file_filter(table_query, RuleSql, [file])

    # Total number of entries in the table
    response['page_metadata']['total'] = table_query.count()

    # Determine what to order by, default to "significance"
    options = {
        'significance': {
            'model': RuleSetting,
            'col': 'priority'
        },
        'rule_label': {
            'model': ErrorMetadata,
            'col': 'original_rule_label'
        },
        'instances': {
            'model': ErrorMetadata,
            'col': 'occurrences'
        },
        'category': {
            'model': RuleSql,
            'col': 'category'
        },
        'impact': {
            'model': RuleSetting,
            'col': 'impact_id'
        },
        'description': {
            'model': ErrorMetadata,
            'col': 'rule_failed'
        }
    }

    sort_order = [getattr(options[sort]['model'], options[sort]['col'])]

    # add secondary sorts
    if sort in ['instances', 'category', 'impact']:
        sort_order.append(RuleSetting.priority)

    # Set the sort order
    if order == 'desc':
        sort_order = [order.desc() for order in sort_order]

    table_query = table_query.order_by(*sort_order)

    # The page we're on
    offset = limit * (page - 1)
    table_query = table_query.slice(offset, offset + limit)

    for result in table_query.all():
        response['results'].append({
            'significance': result.priority,
            'rule_label': result.original_rule_label,
            'instance_count': result.occurrences,
            'category': result.category,
            'impact': result.impact_name,
            'rule_description': result.rule_failed
        })

    return JsonResponse.create(StatusCode.OK, response)