Example #1
0
def test_file_filter_cert_error_metadata(database):
    sess = database.session

    # Setup CertifiedErrorMetadata
    cem_a = CertifiedErrorMetadata(original_rule_label='A1',
                                   file_type_id=FILE_TYPE_DICT_LETTER_ID['A'],
                                   severity_id=RULE_SEVERITY_DICT['fatal'],
                                   target_file_type_id=None)
    cem_b = CertifiedErrorMetadata(original_rule_label='B2',
                                   file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
                                   severity_id=RULE_SEVERITY_DICT['fatal'],
                                   target_file_type_id=None)
    cem_c = CertifiedErrorMetadata(original_rule_label='C3',
                                   file_type_id=FILE_TYPE_DICT_LETTER_ID['C'],
                                   severity_id=RULE_SEVERITY_DICT['fatal'],
                                   target_file_type_id=None)
    cem_cross_ab = CertifiedErrorMetadata(
        original_rule_label='A4',
        file_type_id=FILE_TYPE_DICT_LETTER_ID['A'],
        severity_id=RULE_SEVERITY_DICT['fatal'],
        target_file_type_id=FILE_TYPE_DICT_LETTER_ID['B'])
    cem_cross_ba = CertifiedErrorMetadata(
        original_rule_label='B5',
        file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
        severity_id=RULE_SEVERITY_DICT['fatal'],
        target_file_type_id=FILE_TYPE_DICT_LETTER_ID['A'])
    cem_cross_bc = CertifiedErrorMetadata(
        original_rule_label='B6',
        file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
        severity_id=RULE_SEVERITY_DICT['fatal'],
        target_file_type_id=FILE_TYPE_DICT_LETTER_ID['C'])
    all_cems = [cem_a, cem_b, cem_c, cem_cross_ab, cem_cross_ba, cem_cross_bc]
    sess.add_all(all_cems)
    sess.commit()

    base_query = sess.query(CertifiedErrorMetadata)

    # no file list, no filtering
    files = []
    query = filters_helper.file_filter(base_query, CertifiedErrorMetadata,
                                       files)
    expected_results = all_cems
    results = query.all()
    assert set(results) == set(expected_results)

    # filter by single file
    files = ['A', 'C']
    query = filters_helper.file_filter(base_query, CertifiedErrorMetadata,
                                       files)
    expected_results = [cem_a, cem_c]
    results = query.all()
    assert set(results) == set(expected_results)

    # filter by cross file
    files = ['cross-AB']
    query = filters_helper.file_filter(base_query, CertifiedErrorMetadata,
                                       files)
    expected_results = [cem_cross_ab, cem_cross_ba]
    results = query.all()
    assert set(results) == set(expected_results)
Example #2
0
def test_file_filter_wrong_file_model(database):
    sess = database.session

    base_query = sess.query(CertifiedErrorMetadata)

    # should break cause
    error_text = 'Invalid file model. Use one of the following instead: CertifiedErrorMetadata, ErrorMetadata, ' \
                 'RuleSetting, RuleSql.'
    with pytest.raises(ResponseException) as resp_except:
        filters_helper.file_filter(base_query, Submission, [])
    assert str(resp_except.value) == error_text
Example #3
0
def list_rule_settings(agency_code, file):
    """ Returns a list of prioritized rules an agency.

        Args:
            agency_code: string of the agency's CGAC/FREC code
            file: the rule's file type

        Returns:
            Ordered list of rules prioritized by an agency

        Raises:
            ResponseException if invalid agency code or file type
    """
    sess = GlobalDB.db().session

    if file not in FILE_TYPES:
        raise ResponseException('Invalid file type: {}'.format(file),
                                StatusCode.CLIENT_ERROR)
    if (sess.query(CGAC).filter(CGAC.cgac_code == agency_code).count() == 0) and \
            (sess.query(FREC).filter(FREC.frec_code == agency_code).count() == 0):
        raise ResponseException('Invalid agency_code: {}'.format(agency_code),
                                StatusCode.CLIENT_ERROR)

    # Get the base query with the file filter
    rule_settings_query = sess.query(RuleSetting.priority, RuleSql.rule_label, RuleImpact.name,
                                     RuleSql.rule_error_message, RuleSql.rule_severity_id).\
        join(RuleSql, and_(RuleSql.rule_label == RuleSetting.rule_label, RuleSql.file_id == RuleSetting.file_id,
                           is_not_distinct_from(RuleSql.target_file_id, RuleSetting.target_file_id))).\
        join(RuleImpact, RuleImpact.rule_impact_id == RuleSetting.impact_id)
    rule_settings_query = file_filter(rule_settings_query, RuleSetting, [file])

    # Filter settings by agency. If they haven't set theirs, use the defaults.
    if agency_has_settings(sess, agency_code, file):
        agency_filter = (RuleSetting.agency_code == agency_code)
    else:
        agency_filter = RuleSetting.agency_code.is_(None)
    rule_settings_query = rule_settings_query.filter(agency_filter)

    # Order by priority/significance
    rule_settings_query = rule_settings_query.order_by(RuleSetting.priority)

    errors = []
    warnings = []
    for rule in rule_settings_query.all():
        rule_dict = {
            'label': rule.rule_label,
            'description': rule.rule_error_message,
            'significance': rule.priority,
            'impact': rule.name
        }
        if rule.rule_severity_id == RULE_SEVERITY_DICT['warning']:
            warnings.append(rule_dict)
        else:
            errors.append(rule_dict)
    return JsonResponse.create(StatusCode.OK, {
        'warnings': warnings,
        'errors': errors
    })
Example #4
0
def get_significance_counts(submission, file, error_level):
    """ Gathers information for the signficances section of the active DABS dashboard.

            Args:
                submission: submission to get the significance counts for
                file: The type of file to get the significance counts for
                error_level: whether to get warning or error counts for the significance counts (possible: warning,
                             error, mixed)

            Returns:
                A response containing significance data of the provided submission for the active DABS dashboard.

            Raises:
                ResponseException if submission provided is a FABS submission.
        """
    if submission.d2_submission:
        raise ResponseException('Submission must be a DABS submission.', status=StatusCode.CLIENT_ERROR)

    # Basic data that can be gathered from just the submission and passed filters
    response = {
        'total_instances': 0,
        'rules': []
    }

    sess = GlobalDB.db().session

    # Initial query
    significance_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences,
                                    ErrorMetadata.rule_failed, RuleSetting.priority, RuleSql.category,
                                    RuleSetting.impact_id).\
        join(Job, Job.job_id == ErrorMetadata.job_id). \
        join(RuleSetting, RuleSetting.rule_label == ErrorMetadata.original_rule_label). \
        join(RuleSql, RuleSql.rule_label == ErrorMetadata.original_rule_label). \
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    significance_query = agency_settings_filter(sess, significance_query, agency_code, file)
    significance_query = rule_severity_filter(significance_query, error_level, ErrorMetadata)
    significance_query = file_filter(significance_query, RuleSetting, [file])

    # Ordering by significance to help process the results
    significance_query = significance_query.order_by(RuleSetting.priority)

    for result in significance_query.all():
        response['rules'].append({
            'rule_label': result.original_rule_label,
            'category': result.category,
            'significance': result.priority,
            'impact': RULE_IMPACT_DICT_ID[result.impact_id],
            'instances': result.occurrences
        })
        response['total_instances'] += result.occurrences

    # Calculate the percentages
    for rule_dict in response['rules']:
        rule_dict['percentage'] = round((rule_dict['instances']/response['total_instances'])*100, 1)

    return JsonResponse.create(StatusCode.OK, response)
Example #5
0
def get_impact_counts(submission, file, error_level):
    """ Gathers information for the impact count section of the active DABS dashboard.

            Args:
                submission: submission to get the impact counts for
                file: The type of file to get the impact counts for
                error_level: whether to get warning or error counts for the impact counts (possible: warning, error,
                    mixed)

            Returns:
                A response containing impact count information of the provided submission for the active DABS dashboard.

            Raises:
                ResponseException if submission provided is a FABS submission.
        """
    if submission.d2_submission:
        raise ResponseException('Submission must be a DABS submission.', status=StatusCode.CLIENT_ERROR)

    # Basic data that can be gathered from just the submission and passed filters
    response = {
        'low': {
            'total': 0,
            'rules': []
        },
        'medium': {
            'total': 0,
            'rules': []
        },
        'high': {
            'total': 0,
            'rules': []
        }
    }

    sess = GlobalDB.db().session

    # Initial query
    impact_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences, ErrorMetadata.rule_failed,
                              RuleSetting.impact_id).\
        join(Job, Job.job_id == ErrorMetadata.job_id). \
        join(RuleSetting, RuleSetting.rule_label == ErrorMetadata.original_rule_label). \
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    impact_query = agency_settings_filter(sess, impact_query, agency_code, file)
    impact_query = rule_severity_filter(impact_query, error_level, ErrorMetadata)
    impact_query = file_filter(impact_query, RuleSetting, [file])

    for result in impact_query.all():
        response[RULE_IMPACT_DICT_ID[result.impact_id]]['total'] += 1
        response[RULE_IMPACT_DICT_ID[result.impact_id]]['rules'].append({
            'rule_label': result.original_rule_label,
            'instances': result.occurrences,
            'rule_description': result.rule_failed
        })

    return JsonResponse.create(StatusCode.OK, response)
def list_rule_labels(files, error_level='warning', fabs=False):
    """ Returns a list of rule labels based on the files and error type provided

        Args:
            files: A list of files for which to return rule labels. If blank, return all matching other arguments
            error_level: A string indicating whether to return errors, warnings, or both. Defaults to warning
            fabs: A boolean indicating whether to return FABS or DABS rules. Defaults to False

        Returns:
            JsonResponse of the rule labels the arguments indicate. JsonResponse error if invalid file types are
            provided or any file types are provided for FABS
    """
    # Make sure list is empty when requesting FABS rules
    if fabs and len(files) > 0:
        return JsonResponse.error(
            ValueError('Files list must be empty for FABS rules'),
            StatusCode.CLIENT_ERROR)

    invalid_files = [
        invalid_file for invalid_file in files
        if invalid_file not in FILE_TYPES
    ]
    if invalid_files:
        return JsonResponse.error(
            ValueError('The following are not valid file types: {}'.format(
                ', '.join(invalid_files))), StatusCode.CLIENT_ERROR)

    sess = GlobalDB.db().session

    rule_label_query = sess.query(RuleSql.rule_label)

    # If the error level isn't "mixed" add a filter on which severity to pull
    if error_level == 'error':
        rule_label_query = rule_label_query.filter_by(
            rule_severity_id=RULE_SEVERITY_DICT['fatal'])
    elif error_level == 'warning':
        rule_label_query = rule_label_query.filter_by(
            rule_severity_id=RULE_SEVERITY_DICT['warning'])

    # If specific files have been specified, add a filter to get them
    if files:
        rule_label_query = file_filter(rule_label_query, RuleSql, files)
    elif not fabs:
        # If not the rules are not FABS, exclude FABS rules
        rule_label_query = rule_label_query.filter(
            RuleSql.file_id != FILE_TYPE_DICT_LETTER_ID['FABS'])
    else:
        # If the rule is FABS, add a filter to only get FABS rules
        rule_label_query = rule_label_query.filter_by(
            file_id=FILE_TYPE_DICT_LETTER_ID['FABS'])

    return JsonResponse.create(
        StatusCode.OK,
        {'labels': [label.rule_label for label in rule_label_query.all()]})
def agency_has_settings(sess, agency_code, file):
    """ Helper function to determine if the agency has saved any settings of this file type

        Args:
            sess: the database connection
            agency_code: the agency code to work with
            file: the rule's file type

        Returns:
            True if the agency has saved their settings for this file type
    """

    # Check to see if agency has saved their settings for this file type
    query = sess.query(RuleSetting).filter(
        RuleSetting.agency_code == agency_code)
    query = file_filter(query, RuleSetting, [file])
    return query.count() > 0
Example #8
0
def apply_historic_dabs_details_filters(query, filters):
    """ Apply the detailed filters provided to the query provided

        Args:
            query: the baseline sqlalchemy query to work from
            filters: dictionary representing the detailed filters provided to the historic dashboard endpoints

        Returns:
            the original query with the appropriate filters
    """

    if filters['files']:
        query = file_filter(query, CertifiedErrorMetadata, filters['files'])

    if filters['rules']:
        query = query.filter(CertifiedErrorMetadata.original_rule_label.in_(filters['rules']))

    return query
def active_submission_table(submission,
                            file,
                            error_level,
                            page=1,
                            limit=5,
                            sort='significance',
                            order='desc'):
    """ Gather a list of warnings/errors based on the filters provided to display in the active dashboard table.

        Args:
            submission: submission to get the table data for
            file: The type of file to get the table data for
            error_level: whether to get warnings, errors, or both for the table (possible: warning, error, mixed)
            page: page number to use in getting the list
            limit: the number of entries per page
            sort: the column to order on
            order: order ascending or descending

        Returns:
            A response containing a list of results for the active submission dashboard table and the metadata for
            the table.

        Raises:
            ResponseException if submission provided is a FABS submission.
    """
    if submission.is_fabs:
        raise ResponseException('Submission must be a DABS submission.',
                                status=StatusCode.CLIENT_ERROR)

    # Basic information that is provided by the user and defaults for the rest
    response = {
        'page_metadata': {
            'total': 0,
            'page': page,
            'limit': limit,
            'submission_id': submission.submission_id,
            'files': []
        },
        'results': []
    }

    # File type
    if file in ['A', 'B', 'C']:
        response['page_metadata']['files'] = [file]
    else:
        letters = file.split('-')[1]
        response['page_metadata']['files'] = [letters[:1], letters[1:]]

    sess = GlobalDB.db().session

    # Initial query
    table_query = sess.query(ErrorMetadata.original_rule_label, ErrorMetadata.occurrences, ErrorMetadata.rule_failed,
                             RuleSql.category, RuleSetting.priority, RuleImpact.name.label('impact_name')).\
        join(Job, Job.job_id == ErrorMetadata.job_id).\
        join(RuleSql, and_(RuleSql.rule_label == ErrorMetadata.original_rule_label,
                           RuleSql.file_id == ErrorMetadata.file_type_id,
                           is_not_distinct_from(RuleSql.target_file_id, ErrorMetadata.target_file_type_id))).\
        join(RuleSetting, and_(RuleSql.rule_label == RuleSetting.rule_label, RuleSql.file_id == RuleSetting.file_id,
                               is_not_distinct_from(RuleSql.target_file_id, RuleSetting.target_file_id))).\
        join(RuleImpact, RuleImpact.rule_impact_id == RuleSetting.impact_id).\
        filter(Job.submission_id == submission.submission_id)

    agency_code = submission.frec_code or submission.cgac_code
    table_query = agency_settings_filter(sess, table_query, agency_code, file)
    table_query = rule_severity_filter(table_query, error_level, ErrorMetadata)
    table_query = file_filter(table_query, RuleSql, [file])

    # Total number of entries in the table
    response['page_metadata']['total'] = table_query.count()

    # Determine what to order by, default to "significance"
    options = {
        'significance': {
            'model': RuleSetting,
            'col': 'priority'
        },
        'rule_label': {
            'model': ErrorMetadata,
            'col': 'original_rule_label'
        },
        'instances': {
            'model': ErrorMetadata,
            'col': 'occurrences'
        },
        'category': {
            'model': RuleSql,
            'col': 'category'
        },
        'impact': {
            'model': RuleSetting,
            'col': 'impact_id'
        },
        'description': {
            'model': ErrorMetadata,
            'col': 'rule_failed'
        }
    }

    sort_order = [getattr(options[sort]['model'], options[sort]['col'])]

    # add secondary sorts
    if sort in ['instances', 'category', 'impact']:
        sort_order.append(RuleSetting.priority)

    # Set the sort order
    if order == 'desc':
        sort_order = [order.desc() for order in sort_order]

    table_query = table_query.order_by(*sort_order)

    # The page we're on
    offset = limit * (page - 1)
    table_query = table_query.slice(offset, offset + limit)

    for result in table_query.all():
        response['results'].append({
            'significance': result.priority,
            'rule_label': result.original_rule_label,
            'instance_count': result.occurrences,
            'category': result.category,
            'impact': result.impact_name,
            'rule_description': result.rule_failed
        })

    return JsonResponse.create(StatusCode.OK, response)
def active_submission_overview(submission, file, error_level):
    """ Gathers information for the overview section of the active DABS dashboard.

        Args:
            submission: submission to get the overview for
            file: The type of file to get the overview data for
            error_level: whether to get warning or error counts for the overview (possible: warning, error, mixed)

        Returns:
            A response containing overview information of the provided submission for the active DABS dashboard.

        Raises:
            ResponseException if submission provided is a FABS submission.
    """
    if submission.is_fabs:
        raise ResponseException('Submission must be a DABS submission.',
                                status=StatusCode.CLIENT_ERROR)

    # Basic data that can be gathered from just the submission and passed filters
    response = {
        'submission_id': submission.submission_id,
        'duration': 'Quarterly' if submission.is_quarter_format else 'Monthly',
        'reporting_period': get_time_period(submission),
        'certification_deadline': 'N/A',
        'days_remaining': 'N/A',
        'number_of_rules': 0,
        'total_instances': 0
    }

    # File type
    if file in ['A', 'B', 'C']:
        response['file'] = 'File ' + file
    else:
        response['file'] = 'Cross: ' + file.split('-')[1]

    sess = GlobalDB.db().session

    # Agency-specific data
    if submission.frec_code:
        agency = sess.query(FREC).filter_by(
            frec_code=submission.frec_code).one()
    else:
        agency = sess.query(CGAC).filter_by(
            cgac_code=submission.cgac_code).one()

    response['agency_name'] = agency.agency_name
    response['icon_name'] = agency.icon_name

    # Deadline information, updates the default values of N/A only if it's not a test and the deadline exists
    if not submission.test_submission:
        deadline = get_certification_deadline(submission)
        if deadline:
            today = datetime.now().date()
            if today > deadline:
                response['certification_deadline'] = 'Past Due'
            elif today == deadline:
                response['certification_deadline'] = deadline.strftime(
                    '%B %-d, %Y')
                response['days_remaining'] = 'Due Today'
            else:
                response['certification_deadline'] = deadline.strftime(
                    '%B %-d, %Y')
                response['days_remaining'] = (deadline - today).days

    # Getting rule counts
    rule_query = sess.query(func.sum(ErrorMetadata.occurrences).label('total_instances'),
                            func.count(1).label('number_of_rules')).\
        join(Job, Job.job_id == ErrorMetadata.job_id).filter(Job.submission_id == submission.submission_id).\
        group_by(ErrorMetadata.job_id)

    rule_query = rule_severity_filter(rule_query, error_level, ErrorMetadata)
    rule_query = file_filter(rule_query, ErrorMetadata, [file])

    rule_values = rule_query.first()

    if rule_values:
        response['number_of_rules'] = rule_values.number_of_rules
        response['total_instances'] = rule_values.total_instances

    return JsonResponse.create(StatusCode.OK, response)
def historic_dabs_warning_graphs(filters):
    """ Generate a list of submission graphs appropriate on the filters provided

        Args:
            filters: dictionary representing the filters provided to the historic dashboard endpoints

        Return:
            JsonResponse of the submission summaries appropriate on the filters provided
    """
    sess = GlobalDB.db().session

    validate_historic_dashboard_filters(filters, graphs=True)

    subs_query = sess.query(
        Submission.submission_id,
        Submission.reporting_fiscal_period.label('period'),
        Submission.reporting_fiscal_year.label('fy'),
        Submission.is_quarter_format.label('is_quarter'),
        case([
            (FREC.frec_code.isnot(None), FREC.frec_code),
            (CGAC.cgac_code.isnot(None), CGAC.cgac_code)
        ]).label('agency_code'),
        case([
            (FREC.agency_name.isnot(None), FREC.agency_name),
            (CGAC.agency_name.isnot(None), CGAC.agency_name)
        ]).label('agency_name')
    ).outerjoin(CGAC, CGAC.cgac_code == Submission.cgac_code).\
        outerjoin(FREC, FREC.frec_code == Submission.frec_code).\
        filter(Submission.publish_status_id.in_([PUBLISH_STATUS_DICT['published'], PUBLISH_STATUS_DICT['updated']])).\
        filter(Submission.is_fabs.is_(False)).order_by(Submission.submission_id)

    subs_query = apply_historic_dabs_filters(sess, subs_query, filters)

    # get the submission metadata
    sub_metadata = OrderedDict()
    for query_result in subs_query.all():
        sub_id = query_result.submission_id
        sub_metadata[sub_id] = {
            'submission_id': sub_id,
            'fy': query_result.fy,
            'period': query_result.period,
            'is_quarter': query_result.is_quarter,
            'agency': {
                'name': query_result.agency_name,
                'code': query_result.agency_code,
            },
            'total_warnings': 0,
            'filtered_warnings': 0,
            'warnings': []
        }
    sub_ids = list(sub_metadata.keys())

    # build baseline results dict
    results_data = OrderedDict()
    resulting_files = filters['files'] or FILE_TYPES
    for resulting_file in sorted(resulting_files):
        results_data[resulting_file] = copy.deepcopy(sub_metadata)

    if sub_ids:
        # get metadata for subs/files
        error_metadata_query = sess.query(
            Job.submission_id,
            PublishedErrorMetadata.file_type_id,
            PublishedErrorMetadata.target_file_type_id,
            PublishedErrorMetadata.original_rule_label.label('label'),
            PublishedErrorMetadata.occurrences.label('instances')
        ).join(PublishedErrorMetadata, PublishedErrorMetadata.job_id == Job.job_id).\
            filter(Job.submission_id.in_(sub_ids))

        # Get the total number of warnings for each submission
        total_warnings_query = sess.query(
            func.coalesce(func.sum(PublishedErrorMetadata.occurrences), 0).label('total_instances'),
            PublishedErrorMetadata.file_type_id,
            PublishedErrorMetadata.target_file_type_id,
            Job.submission_id
        ).join(Job, Job.job_id == PublishedErrorMetadata.job_id).filter(Job.submission_id.in_(sub_ids)).\
            group_by(Job.submission_id, PublishedErrorMetadata.file_type_id, PublishedErrorMetadata.target_file_type_id)

        error_metadata_query = apply_historic_dabs_details_filters(
            error_metadata_query, filters)
        total_warnings_query = file_filter(total_warnings_query,
                                           PublishedErrorMetadata,
                                           filters['files'])

        # ordering warnings so they all come out in the same order
        error_metadata_query = error_metadata_query.order_by(
            PublishedErrorMetadata.original_rule_label)

        # Add warnings objects to results dict
        for query_result in error_metadata_query.all():
            file_type = generate_file_type(query_result.file_type_id,
                                           query_result.target_file_type_id)
            submission_id = query_result.submission_id

            # update based on warning data
            results_data[file_type][submission_id][
                'filtered_warnings'] += query_result.instances
            warning = {
                'label': query_result.label,
                'instances': query_result.instances,
                'percent_total': 0
            }
            results_data[file_type][submission_id]['warnings'].append(warning)

        # Add total warnings to results dict
        for query_result in total_warnings_query.all():
            file_type = generate_file_type(query_result.file_type_id,
                                           query_result.target_file_type_id)
            submission_id = query_result.submission_id
            results_data[file_type][submission_id][
                'total_warnings'] += query_result.total_instances

        # Calculate the percentages
        for _, file_dict in results_data.items():
            for _, sub_dict in file_dict.items():
                for warning in sub_dict['warnings']:
                    warning['percent_total'] = round(
                        (warning['instances'] / sub_dict['total_warnings']) *
                        100)

    # Convert submissions dicts to lists
    results = OrderedDict()
    for file_type, file_dict in results_data.items():
        results[file_type] = [
            sub_dict for sub_id, sub_dict in file_dict.items()
        ]

    return JsonResponse.create(StatusCode.OK, results)
Example #12
0
def test_file_filter_rulesql(database):
    sess = database.session

    # Setup RuleSql
    rsql_a = RuleSql(rule_sql='',
                     rule_label='A1',
                     rule_error_message='',
                     query_name='',
                     file_id=FILE_TYPE_DICT_LETTER_ID['A'],
                     rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                     rule_cross_file_flag=False,
                     target_file_id=None)
    rsql_b = RuleSql(rule_sql='',
                     rule_label='B2',
                     rule_error_message='',
                     query_name='',
                     file_id=FILE_TYPE_DICT_LETTER_ID['B'],
                     rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                     rule_cross_file_flag=False,
                     target_file_id=None)
    rsql_c = RuleSql(rule_sql='',
                     rule_label='C3',
                     rule_error_message='',
                     query_name='',
                     file_id=FILE_TYPE_DICT_LETTER_ID['C'],
                     rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                     rule_cross_file_flag=False,
                     target_file_id=None)
    rsql_cross_ab = RuleSql(rule_sql='',
                            rule_label='A4',
                            rule_error_message='',
                            query_name='',
                            file_id=FILE_TYPE_DICT_LETTER_ID['A'],
                            rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                            rule_cross_file_flag=True,
                            target_file_id=FILE_TYPE_DICT_LETTER_ID['B'])
    rsql_cross_ba = RuleSql(rule_sql='',
                            rule_label='B5',
                            rule_error_message='',
                            query_name='',
                            file_id=FILE_TYPE_DICT_LETTER_ID['B'],
                            rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                            rule_cross_file_flag=True,
                            target_file_id=FILE_TYPE_DICT_LETTER_ID['A'])
    rsql_cross_bc = RuleSql(rule_sql='',
                            rule_label='B6',
                            rule_error_message='',
                            query_name='',
                            file_id=FILE_TYPE_DICT_LETTER_ID['B'],
                            rule_severity_id=RULE_SEVERITY_DICT['fatal'],
                            rule_cross_file_flag=True,
                            target_file_id=FILE_TYPE_DICT_LETTER_ID['C'])
    all_rules = [
        rsql_a, rsql_b, rsql_c, rsql_cross_ab, rsql_cross_ba, rsql_cross_bc
    ]
    sess.add_all(all_rules)
    sess.commit()

    base_query = sess.query(RuleSql)

    # no file list, no filtering
    files = []
    query = filters_helper.file_filter(base_query, RuleSql, files)
    expected_results = all_rules
    results = query.all()
    assert set(results) == set(expected_results)

    # filter by single file
    files = ['A', 'C']
    query = filters_helper.file_filter(base_query, RuleSql, files)
    expected_results = [rsql_a, rsql_c]
    results = query.all()
    assert set(results) == set(expected_results)

    # filter by cross file
    files = ['cross-AB']
    query = filters_helper.file_filter(base_query, RuleSql, files)
    expected_results = [rsql_cross_ab, rsql_cross_ba]
    results = query.all()
    assert set(results) == set(expected_results)
Example #13
0
def save_rule_settings(agency_code, file, errors, warnings):
    """ Given two lists of rules, their settings, agency code, and file, save them in the database.

        Args:
            agency_code: string of the agency's CGAC/FREC code
            file: the rule's file type
            errors: list of error objects and their settings
            warnings: list of warning objects and their settings

        Raises:
            ResponseException if invalid agency code or rule dict
    """
    sess = GlobalDB.db().session

    if (sess.query(CGAC).filter(CGAC.cgac_code == agency_code).count() == 0) and \
            (sess.query(FREC).filter(FREC.frec_code == agency_code).count() == 0):
        raise ResponseException('Invalid agency_code: {}'.format(agency_code),
                                StatusCode.CLIENT_ERROR)

    has_settings = agency_has_settings(sess=sess,
                                       agency_code=agency_code,
                                       file=file)

    for rule_type, rules in {'fatal': errors, 'warning': warnings}.items():
        # Get the rule ids from the labels
        rule_label_query = file_filter(
            sess.query(RuleSql.rule_label, RuleSql.file_id,
                       RuleSql.target_file_id), RuleSql, [file])
        rule_label_query = rule_label_query.filter(
            RuleSql.rule_severity_id == RULE_SEVERITY_DICT[rule_type])
        rule_label_mapping = {
            rule.rule_label: {
                'file_id': rule.file_id,
                'target_file_id': rule.target_file_id
            }
            for rule in rule_label_query.all()
        }

        # Compare them with the list provided
        rule_labels = [rule['label'] for rule in rules if 'label' in rule]
        if sorted(rule_labels) != sorted(rule_label_mapping):
            logger.info('{} {}'.format(sorted(rule_labels),
                                       sorted(rule_label_mapping)))
            raise ResponseException(
                'Rules list provided doesn\'t match the rules expected: {}'.
                format(', '.join(rule_labels)), StatusCode.CLIENT_ERROR)

        # resetting priorities by the order of the incoming lists
        priority = 1
        for rule_dict in rules:
            validate_rule_dict(rule_dict)
            rule_label = rule_dict['label']
            impact_id = RULE_IMPACT_DICT[rule_dict['impact']]
            file_id = rule_label_mapping[rule_label]['file_id']
            target_file_id = rule_label_mapping[rule_label]['target_file_id']

            if not has_settings:
                sess.add(
                    RuleSetting(agency_code=agency_code,
                                rule_label=rule_label,
                                file_id=file_id,
                                target_file_id=target_file_id,
                                priority=priority,
                                impact_id=impact_id))
            else:
                sess.query(RuleSetting).filter(RuleSetting.agency_code == agency_code,
                                               RuleSetting.rule_label == rule_label,
                                               RuleSetting.file_id == file_id,
                                               RuleSetting.target_file_id == target_file_id).\
                    update({'priority': priority, 'impact_id': impact_id}, synchronize_session=False)
            priority += 1
    sess.commit()
    return JsonResponse.create(
        StatusCode.OK,
        {'message': 'Agency {} rules saved.'.format(agency_code)})