def load_cgac(file_name):
    """Load CGAC (high-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {cgac.cgac_code: cgac for cgac in sess.query(CGAC)}

        # read CGAC values from csv
        data = pd.read_csv(file_name, dtype=str)
        # clean data
        data = clean_data(
            data, CGAC, {
                "cgac_agency_code": "cgac_code",
                "agency_name": "agency_name",
                "agency_abbreviation": "agency_abbreviation"
            }, {"cgac_code": {
                "pad_to_length": 3
            }})
        # de-dupe
        data.drop_duplicates(subset=['cgac_code'], inplace=True)

        delete_missing_cgacs(models, data)
        update_cgacs(models, data)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s CGAC records inserted', len(models))
def load_cgac(file_name):
    """Load CGAC (high-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {cgac.cgac_code: cgac for cgac in sess.query(CGAC)}

        # read CGAC values from csv
        data = pd.read_csv(file_name, dtype=str)
        # clean data
        data = clean_data(
            data,
            CGAC,
            {"cgac_agency_code": "cgac_code", "agency_name": "agency_name",
             "agency_abbreviation": "agency_abbreviation"},
            {"cgac_code": {"pad_to_length": 3}}
        )
        # de-dupe
        data.drop_duplicates(subset=['cgac_code'], inplace=True)

        delete_missing_cgacs(models, data)
        update_cgacs(models, data)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s CGAC records inserted', len(models))
def load_object_class(filename):
    """Load object class lookup table."""
    model = ObjectClass

    with create_app().app_context():
        sess = GlobalDB.db().session
        # for object class, delete and replace values
        sess.query(model).delete()

        data = pd.read_csv(filename, dtype=str)
        data = clean_data(
            data, model, {
                "max_oc_code": "object_class_code",
                "max_object_class_name": "object_class_name"
            }, {"object_class_code": {
                "pad_to_length": 3
            }})
        # de-dupe
        data.drop_duplicates(subset=['object_class_code'], inplace=True)
        # insert to db
        table_name = model.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    logger.info('{} records inserted to {}'.format(num, table_name))
예제 #4
0
def clean_tas(csv_path):
    """Read a CSV into a dataframe, then use a configured `clean_data` and
    return the results"""
    data = pd.read_csv(csv_path, dtype=str)
    data = clean_data(
        data,
        TASLookup,
        {"a": "availability_type_code",
         "acct_num": "account_num",
         "aid": "agency_identifier",
         "ata": "allocation_transfer_agency",
         "bpoa": "beginning_period_of_availa",
         "epoa": "ending_period_of_availabil",
         "main": "main_account_code",
         "sub": "sub_account_code",
         "financial_indicator_type2": "financial_indicator2",
         "dt_tm_estab": "internal_start_date",
         "dt_end": "internal_end_date",
         "fr_entity_description": "fr_entity_description",
         "fr_entity_type": "fr_entity_type"
         },
        {"allocation_transfer_agency": {"pad_to_length": 3, "keep_null": True},
         "agency_identifier": {"pad_to_length": 3},
         # Account for " " cells
         "availability_type_code": {"pad_to_length": 0, "keep_null": True},
         "beginning_period_of_availa": {"pad_to_length": 0, "keep_null": True},
         "ending_period_of_availabil": {"pad_to_length": 0, "keep_null": True},
         "main_account_code": {"pad_to_length": 4},
         "sub_account_code": {"pad_to_length": 3},
         }
    )
    data["account_num"] = pd.to_numeric(data['account_num'])
    return data.where(pd.notnull(data), None)
def load_frec(file_name):
    """Load FREC (high-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {frec.frec_code: frec for frec in sess.query(FREC)}

        # read FREC values from csv
        data = pd.read_csv(file_name, dtype=str)

        # clean data
        data = clean_data(
            data,
            FREC,
            {"frec": "frec_code", "cgac_agency_code": "cgac_code", "frec_entity_description": "agency_name",
             "agency_abbreviation": "agency_abbreviation"},
            {"frec": {"keep_null": False}, "cgac_code": {"pad_to_length": 3}, "frec_code": {"pad_to_length": 4}}
        )
        # de-dupe
        data.drop_duplicates(subset=['frec_code'], inplace=True)
        # create foreign key dicts
        cgac_dict = {str(cgac.cgac_code): cgac.cgac_id for
                     cgac in sess.query(CGAC).filter(CGAC.cgac_code.in_(data["cgac_code"])).all()}

        # insert to db
        delete_missing_frecs(models, data)
        update_frecs(models, data, cgac_dict)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s FREC records inserted', len(models))
def load_sub_tier_agencies(file_name):
    """Load Sub Tier Agency (sub_tier-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {
            sub_tier_agency.sub_tier_agency_code: sub_tier_agency
            for sub_tier_agency in sess.query(SubTierAgency)
        }

        # read Sub Tier Agency values from csv
        data = pd.read_csv(file_name, dtype=str)

        condition = data["FPDS DEPARTMENT ID"] == data["SUBTIER CODE"]
        data.loc[condition, "PRIORITY"] = 1
        data.loc[~condition, "PRIORITY"] = 2

        # clean data
        data = clean_data(
            data, SubTierAgency, {
                "cgac_agency_code": "cgac_code",
                "subtier_code": "sub_tier_agency_code",
                "priority": "priority",
                "frec": "frec_code",
                "subtier_name": "sub_tier_agency_name",
                "is_frec": "is_frec"
            }, {
                "cgac_code": {
                    "pad_to_length": 3
                },
                "frec_code": {
                    "pad_to_length": 4
                },
                "sub_tier_agency_code": {
                    "pad_to_length": 4
                }
            })
        # de-dupe
        data.drop_duplicates(subset=['sub_tier_agency_code'], inplace=True)
        # create foreign key dicts
        cgac_dict = {
            str(cgac.cgac_code): cgac.cgac_id
            for cgac in sess.query(CGAC).filter(
                CGAC.cgac_code.in_(data["cgac_code"])).all()
        }
        frec_dict = {
            str(frec.frec_code): frec.frec_id
            for frec in sess.query(FREC).filter(
                FREC.frec_code.in_(data["frec_code"])).all()
        }

        delete_missing_sub_tier_agencies(models, data)
        update_sub_tier_agencies(models, data, cgac_dict, frec_dict)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s Sub Tier Agency records inserted', len(models))
예제 #7
0
def clean_sam_data(data):
    return clean_data(data, DUNS, {
        "awardee_or_recipient_uniqu": "awardee_or_recipient_uniqu",
        "activation_date": "activation_date",
        "deactivation_date": "deactivation_date",
        "registration_date": "registration_date",
        "expiration_date": "expiration_date",
        "last_sam_mod_date": "last_sam_mod_date",
        "sam_extract_code": "sam_extract_code",
        "legal_business_name": "legal_business_name"
    }, {})
def clean_sam_data(data):
    return clean_data(
                data,
                DUNS,
                {"awardee_or_recipient_uniqu": "awardee_or_recipient_uniqu",
                 "activation_date": "activation_date",
                 "deactivation_date": "deactivation_date",
                 "expiration_date": "expiration_date",
                 "last_sam_mod_date": "last_sam_mod_date",
                 "sam_extract_code": "sam_extract_code",
                 "legal_business_name": "legal_business_name"},
                {'awardee_or_recipient_uniqu': {'pad_to_length': 9, 'keep_null': True}}
            )
예제 #9
0
def clean_sam_data(data, table=DUNS):
    return clean_data(
        data, table, {
            "awardee_or_recipient_uniqu": "awardee_or_recipient_uniqu",
            "activation_date": "activation_date",
            "deactivation_date": "deactivation_date",
            "registration_date": "registration_date",
            "expiration_date": "expiration_date",
            "last_sam_mod_date": "last_sam_mod_date",
            "sam_extract_code": "sam_extract_code",
            "legal_business_name": "legal_business_name",
            "ultimate_parent_legal_enti": "ultimate_parent_legal_enti",
            "ultimate_parent_unique_ide": "ultimate_parent_unique_ide"
        }, {})
def load_program_activity(filename):
    """Load program activity lookup table."""
    model = ProgramActivity

    with create_app().app_context():
        sess = GlobalDB.db().session

        # for program activity, delete and replace values??
        sess.query(model).delete()

        data = pd.read_csv(filename, dtype=str)
        data = clean_data(
            data, model, {
                "year": "budget_year",
                "agency_id": "agency_id",
                "alloc_id": "allocation_transfer_id",
                "account": "account_number",
                "pa_code": "program_activity_code",
                "pa_name": "program_activity_name"
            }, {
                "program_activity_code": {
                    "pad_to_length": 4
                },
                "agency_id": {
                    "pad_to_length": 3
                },
                "allocation_transfer_id": {
                    "pad_to_length": 3,
                    "keep_null": True
                },
                "account_number": {
                    "pad_to_length": 4
                }
            })
        # Lowercase Program Activity Name
        data['program_activity_name'] = data['program_activity_name'].apply(
            lambda x: x.lower())
        # because we're only loading a subset of program activity info,
        # there will be duplicate records in the dataframe. this is ok,
        # but need to de-duped before the db load.
        data.drop_duplicates(inplace=True)
        # insert to db
        table_name = model.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    logger.info('{} records inserted to {}'.format(num, table_name))
def update_state_congr_table_census(census_file, sess):
    logger.info("Adding congressional districtions from census to the state_congressional table")

    data = pd.read_csv(census_file, dtype=str)
    model = StateCongressional

    data = clean_data(
        data,
        model,
        {"state_code": "state_code",
         "congressional_district_no": "congressional_district_no",
         "census_year": "census_year"},
        {'congressional_district_no': {"pad_to_length": 2}}
    )

    table_name = model.__table__.name
    insert_dataframe(data, table_name, sess.connection())
    sess.commit()
def load_frec(file_name):
    """Load FREC (high-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {frec.frec_code: frec for frec in sess.query(FREC)}

        # read FREC values from csv
        data = pd.read_csv(file_name, dtype=str)

        # clean data
        data = clean_data(
            data, FREC, {
                "frec": "frec_code",
                "cgac_agency_code": "cgac_code",
                "frec_entity_description": "agency_name",
                "agency_abbreviation": "agency_abbreviation"
            }, {
                "frec": {
                    "keep_null": False
                },
                "cgac_code": {
                    "pad_to_length": 3
                },
                "frec_code": {
                    "pad_to_length": 4
                }
            })
        # de-dupe
        data.drop_duplicates(subset=['frec_code'], inplace=True)
        # create foreign key dicts
        cgac_dict = {
            str(cgac.cgac_code): cgac.cgac_id
            for cgac in sess.query(CGAC).filter(
                CGAC.cgac_code.in_(data["cgac_code"])).all()
        }

        # insert to db
        delete_missing_frecs(models, data)
        update_frecs(models, data, cgac_dict)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s FREC records inserted', len(models))
예제 #13
0
def clean_office(csv_path):
    """Read a CSV into a dataframe, then use a configured `clean_data` and
    return the results"""
    data = pd.read_csv(csv_path, dtype=str)
    data = clean_data(
        data, FPDSContractingOffice, {
            "department_id": "department_id",
            "department_name": "department_name",
            "agency_code": "agency_code",
            "agency_name": "agency_name",
            "contracting_office_code": "contracting_office_code",
            "contracting_office_name": "contracting_office_name",
            "start_date": "start_date",
            "end_date": "end_date",
            "address_city": "address_city",
            "address_state": "address_state",
            "zip_code": "zip_code",
            "country_code": "country_code"
        }, {"department_id": {
            "pad_to_length": 4
        }})
    return data.where(pd.notnull(data), None)
def load_sub_tier_agencies(file_name):
    """Load Sub Tier Agency (sub_tier-level agency names) lookup table."""
    with create_app().app_context():
        sess = GlobalDB.db().session
        models = {sub_tier_agency.sub_tier_agency_code: sub_tier_agency for
                  sub_tier_agency in sess.query(SubTierAgency)}

        # read Sub Tier Agency values from csv
        data = pd.read_csv(file_name, dtype=str)

        condition = data["FPDS DEPARTMENT ID"] == data["SUBTIER CODE"]
        data.loc[condition, "PRIORITY"] = 1
        data.loc[~condition, "PRIORITY"] = 2

        # clean data
        data = clean_data(
            data,
            SubTierAgency,
            {"cgac_agency_code": "cgac_code", "subtier_code": "sub_tier_agency_code", "priority": "priority",
             "frec": "frec_code", "subtier_name": "sub_tier_agency_name", "is_frec": "is_frec"},
            {"cgac_code": {"pad_to_length": 3}, "frec_code": {"pad_to_length": 4},
             "sub_tier_agency_code": {"pad_to_length": 4}}
        )
        # de-dupe
        data.drop_duplicates(subset=['sub_tier_agency_code'], inplace=True)
        # create foreign key dicts
        cgac_dict = {str(cgac.cgac_code): cgac.cgac_id for
                     cgac in sess.query(CGAC).filter(CGAC.cgac_code.in_(data["cgac_code"])).all()}
        frec_dict = {str(frec.frec_code): frec.frec_id for
                     frec in sess.query(FREC).filter(FREC.frec_code.in_(data["frec_code"])).all()}

        delete_missing_sub_tier_agencies(models, data)
        update_sub_tier_agencies(models, data, cgac_dict, frec_dict)
        sess.add_all(models.values())
        sess.commit()

        logger.info('%s Sub Tier Agency records inserted', len(models))
예제 #15
0
def load_object_class(base_path):
    """ This function loads Object classes into the database

        Args:
            base_path: directory that contains the domain values files.
    """
    if CONFIG_BROKER["use_aws"]:
        s3connection = boto.s3.connect_to_region(CONFIG_BROKER['aws_region'])
        s3bucket = s3connection.lookup(CONFIG_BROKER['sf_133_bucket'])
        filename = s3bucket.get_key("object_class.csv").generate_url(
            expires_in=600)
    else:
        filename = os.path.join(base_path, "object_class.csv")

    # Load object class lookup table
    logger.info('Loading Object Class File: object_class.csv')
    with create_app().app_context():
        sess = GlobalDB.db().session
        sess.query(ObjectClass).delete()

        data = pd.read_csv(filename, dtype=str)
        data = clean_data(
            data, ObjectClass, {
                "max_oc_code": "object_class_code",
                "max_object_class_name": "object_class_name"
            }, {"object_class_code": {
                "pad_to_length": 3
            }})
        # de-dupe
        data.drop_duplicates(subset=['object_class_code'], inplace=True)
        # insert to db
        table_name = ObjectClass.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    logger.info('{} records inserted to {}'.format(num, table_name))
def load_country_codes(base_path):
    """ Load Country Codes into the database.

        Args
            base_path: directory that contains the domain values files.
    """

    if CONFIG_BROKER["use_aws"]:
        s3connection = boto.s3.connect_to_region(CONFIG_BROKER['aws_region'])
        s3bucket = s3connection.lookup(CONFIG_BROKER['sf_133_bucket'])
        filename = s3bucket.get_key("country_codes.csv").generate_url(expires_in=600)
    else:
        filename = os.path.join(base_path, "country_codes.csv")

    logger.info('Loading country codes file: country_codes.csv')

    with create_app().app_context():
        sess = GlobalDB.db().session
        # for object class, delete and replace values
        sess.query(CountryCode).delete()

        data = pd.read_csv(filename, dtype=str)
        data = clean_data(
            data,
            CountryCode,
            {"country_code": "country_code", "country_name": "country_name"},
            {}
        )
        # de-dupe
        data.drop_duplicates(subset=['country_code'], inplace=True)
        # insert to db
        table_name = CountryCode.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    logger.info('{} records inserted to {}'.format(num, table_name))
예제 #17
0
def format_fabs_data(data):
    # drop all records without any data to be loaded
    data = data.replace('', np.nan, inplace=True)
    data.dropna(subset=[
        "awarding office code", "awarding office name", "funding office name",
        "funding office code", "funding agency name", "funding agency code",
        "funding sub tier agency code", "funding sub tier agency name",
        "legal entity foreign city", "legal entity foreign province",
        "legal entity foreign postal code",
        "legal entity foreign location description"
    ],
                inplace=True)

    # ensure there are rows to be cleaned and formatted
    if len(data.index) == 0:
        return None

    cdata = clean_data(
        data, PublishedAwardFinancialAssistance, {
            "agency_code":
            "awarding_sub_tier_agency_c",
            "federal_award_mod":
            "award_modification_amendme",
            "federal_award_id":
            "fain",
            "uri":
            "uri",
            "awarding office code":
            "awarding_office_code",
            "awarding office name":
            "awarding_office_name",
            "funding office name":
            "funding_office_name",
            "funding office code":
            "funding_office_code",
            "funding agency name":
            "funding_agency_name",
            "funding agency code":
            "funding_agency_code",
            "funding sub tier agency code":
            "funding_sub_tier_agency_co",
            "funding sub tier agency name":
            "funding_sub_tier_agency_na",
            "legal entity foreign city":
            "legal_entity_foreign_city",
            "legal entity foreign province":
            "legal_entity_foreign_provi",
            "legal entity foreign postal code":
            "legal_entity_foreign_posta",
            "legal entity foreign location description":
            "legal_entity_foreign_descr"
        }, {})

    # make a pass through the dataframe, changing any empty values to None, to ensure that those are represented as
    # NULL in the db.
    cdata = cdata.replace(np.nan, '', regex=True)
    cdata = cdata.applymap(lambda x: str(x).strip()
                           if len(str(x).strip()) else None)

    # generate the afa_generated_unique field
    cdata['afa_generated_unique'] = cdata.apply(
        lambda x: generate_unique_string(x), axis=1)

    # drop columns in afa_generated_unique because we aren't updating them
    for col in [
            "awarding_sub_tier_agency_c", "award_modification_amendme", "fain",
            "uri"
    ]:
        del cdata[col]

    return cdata
def load_cfda_program(filename):
    """Load country code lookup table."""
    model = CFDAProgram

    with create_app().app_context():
        sess = GlobalDB.db().session
        # for object class, delete and replace values
        sess.query(model).delete()

        data = pd.read_csv(filename, dtype=str, encoding='latin1')

        data = clean_data(
            data, model, {
                "program_title": "program_title",
                "program_number": "program_number",
                "popular_name_(020)": "popular_name",
                "federal_agency_(030)": "federal_agency",
                "authorization_(040)": "authorization",
                "objectives_(050)": "objectives",
                "types_of_assistance_(060)": "types_of_assistance",
                "uses_and_use_restrictions_(070)": "uses_and_use_restrictions",
                "applicant_eligibility_(081)": "applicant_eligibility",
                "beneficiary_eligibility_(082)": "beneficiary_eligibility",
                "credentials/documentation_(083)": "credentials_documentation",
                "preapplication_coordination_(091)":
                "preapplication_coordination",
                "application_procedures_(092)": "application_procedures",
                "award_procedure_(093)": "award_procedure",
                "deadlines_(094)": "deadlines",
                "range_of_approval/disapproval_time_(095)":
                "range_of_approval_disapproval_time",
                "appeals_(096)": "appeals",
                "renewals_(097)": "renewals",
                "formula_and_matching_requirements_(101)":
                "formula_and_matching_requirements",
                "length_and_time_phasing_of_assistance_(102)":
                "length_and_time_phasing_of_assistance",
                "reports_(111)": "reports",
                "audits_(112)": "audits",
                "records_(113)": "records",
                "account_identification_(121)": "account_identification",
                "obligations_(122)": "obligations",
                "range_and_average_of_financial_assistance_(123)":
                "range_and_average_of_financial_assistance",
                "program_accomplishments_(130)": "program_accomplishments",
                "regulations__guidelines__and_literature_(140)":
                "regulations_guidelines_and_literature",
                "regional_or_local_office_(151)": "regional_or_local_office",
                "headquarters_office_(152)": "headquarters_office",
                "website_address_(153)": "website_address",
                "related_programs_(160)": "related_programs",
                "examples_of_funded_projects_(170)":
                "examples_of_funded_projects",
                "criteria_for_selecting_proposals_(180)":
                "criteria_for_selecting_proposals",
                "url": "url",
                "recovery": "recovery",
                "omb_agency_code": "omb_agency_code",
                "omb_bureau_code": "omb_bureau_code",
                "published_date": "published_date",
                "archived_date": "archived_date"
            }, {})
        data["published_date"] = format_date(data["published_date"])
        data["archived_date"] = format_date(data["archived_date"])

        # insert to db
        table_name = model.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    logger.info('{} records inserted to {}'.format(num, table_name))
def format_fabs_data(data):
    logger.info("formatting data")

    # drop rows with null FAIN and URI
    data = data[~((data['federal_award_id'].isnull()) & (data['uri'].isnull()))].copy()

    if len(data.index) == 0:
        return None

    proper_casing_cols = ['recipient_name', 'recipient_city_name', 'recipient_county_name', 'receip_addr1',
                          'receip_addr2', 'receip_addr3']
    for col in proper_casing_cols:
        data[col] = data.apply(lambda x: format_proper_casing(x, col), axis=1)

    cols_with_colons = ['action_type', 'assistance_type', 'agency_code', 'recipient_type', 'correction_late_ind']
    for col in cols_with_colons:
        data[col] = data.apply(lambda x: remove_data_after_colon(x, col), axis=1)

    # data['recipient_city_code'] = data.apply(lambda x: format_integer_code(x, 'recipient_city_code', 5), axis=1)
    data['recipient_county_code'] = data.apply(lambda x: format_integer_code(x, 'recipient_county_code', 3), axis=1)
    data['legal_entity_zip5'] = data.apply(lambda x: format_zip_five(x), axis=1)
    data['legal_entity_zip_last4'] = data.apply(lambda x: format_zip_four(x), axis=1)
    data['total_funding_amount'] = data.apply(lambda x: format_total_funding(x), axis=1)
    data['starting_date'] = data.apply(lambda x: format_date(x, 'starting_date'), axis=1)
    data['ending_date'] = data.apply(lambda x: format_date(x, 'ending_date'), axis=1)
    data['record_type'] = data.apply(lambda x: format_record_type(x), axis=1)
    data['place_of_perform_county_na'] = data.apply(lambda x: format_cc_code(x, True), axis=1)
    data['place_of_perform_city'] = data.apply(lambda x: format_cc_code(x, False), axis=1)
    data['principal_place_zip'] = data.apply(lambda x: format_full_zip(x), axis=1)
    data['principal_place_cd'] = data.apply(lambda x: format_cd(x, 'principal_place_cd'), axis=1)
    data['recipient_cd'] = data.apply(lambda x: format_cd(x, 'recipient_cd'), axis=1)
    data['is_historical'] = np.full(len(data.index), True, dtype=bool)

    # adding columns missing from historical data
    null_list = [
        'awarding_sub_tier_agency_n', 'awarding_agency_code', 'awarding_agency_name', 'awarding_office_code',
        'funding_agency_name', 'funding_agency_code', 'funding_office_code', 'funding_sub_tier_agency_co',
        'funding_sub_tier_agency_na', 'legal_entity_foreign_city', 'legal_entity_foreign_posta',
        'legal_entity_foreign_provi', 'place_of_performance_forei'
    ]
    for item in null_list:
        data[item] = None

    cdata = clean_data(
        data,
        PublishedAwardFinancialAssistance,
        {
            'obligation_action_date': 'action_date',
            'action_type': 'action_type',
            'assistance_type': 'assistance_type',
            'project_description': 'award_description',
            'recipient_name': 'awardee_or_recipient_legal',
            'duns_no': 'awardee_or_recipient_uniqu',
            'awarding_agency_code': 'awarding_agency_code',
            'awarding_agency_name': 'awarding_agency_name',
            'awarding_office_code': 'awarding_office_code',
            'agency_code': 'awarding_sub_tier_agency_c',
            'awarding_sub_tier_agency_n': 'awarding_sub_tier_agency_n',
            'federal_award_mod': 'award_modification_amendme',
            'rec_flag': 'business_funds_indicator',
            'recipient_type': 'business_types',
            'cfda_program_num': 'cfda_number',
            'cfda_program_title': 'cfda_title',
            'correction_late_ind': 'correction_late_delete_ind',
            'face_loan_guran': 'face_value_loan_guarantee',
            'federal_award_id': 'fain',
            'fed_funding_amount': 'federal_action_obligation',
            'fyq_correction': 'fiscal_year_and_quarter_co',
            'funding_agency_name': 'funding_agency_name',
            'funding_agency_code': 'funding_agency_code',
            'funding_office_code': 'funding_office_code',
            'funding_sub_tier_agency_co': 'funding_sub_tier_agency_co',
            'funding_sub_tier_agency_na': 'funding_sub_tier_agency_na',
            'is_historical': 'is_historical',
            'receip_addr1': 'legal_entity_address_line1',
            'receip_addr2': 'legal_entity_address_line2',
            'receip_addr3': 'legal_entity_address_line3',
            # 'recipient_city_code': 'legal_entity_city_code',
            'recipient_city_name': 'legal_entity_city_name',
            'recipient_cd': 'legal_entity_congressional',
            'recipient_country_code': 'legal_entity_country_code',
            'recipient_county_code': 'legal_entity_county_code',
            'recipient_county_name': 'legal_entity_county_name',
            'legal_entity_foreign_city': 'legal_entity_foreign_city',
            'legal_entity_foreign_posta': 'legal_entity_foreign_posta',
            'legal_entity_foreign_provi': 'legal_entity_foreign_provi',
            'recipient_state_code': 'legal_entity_state_code',
            'legal_entity_zip5': 'legal_entity_zip5',
            'legal_entity_zip_last4': 'legal_entity_zip_last4',
            'last_modified_date': 'modified_at',
            'non_fed_funding_amount': 'non_federal_funding_amount',
            'orig_sub_guran': 'original_loan_subsidy_cost',
            'ending_date': 'period_of_performance_curr',
            'starting_date': 'period_of_performance_star',
            'principal_place_code': 'place_of_performance_code',
            'principal_place_cd': 'place_of_performance_congr',
            'principal_place_country_code': 'place_of_perform_country_c',
            'place_of_performance_forei': 'place_of_performance_forei',
            'principal_place_zip': 'place_of_performance_zip4a',
            'place_of_perform_city': 'place_of_performance_city',
            'place_of_perform_county_na': 'place_of_perform_county_na',
            'principal_place_state': 'place_of_perform_state_nam',
            'record_type': 'record_type',
            'sai_number': 'sai_number',
            'total_funding_amount': 'total_funding_amount',
            'uri': 'uri'
        }, {
            'place_of_performance_congr': {'pad_to_length': 2, 'keep_null': True},
            'legal_entity_congressional': {'pad_to_length': 2, 'keep_null': True},
            'awardee_or_recipient_uniqu': {'pad_to_length': 9, 'keep_null': True}
        }
    )

    # make a pass through the dataframe, changing any empty values to None, to ensure that those are represented as
    # NULL in the db.
    cdata = cdata.replace(np.nan, '', regex=True)
    cdata = cdata.applymap(lambda x: str(x).strip() if len(str(x).strip()) else None)

    # generate the afa_generated_unique field
    cdata['afa_generated_unique'] = cdata.apply(lambda x: generate_unique_string(x), axis=1)

    return cdata
예제 #20
0
def clean_sf133_data(filename, sf133_data):
    data = pd.read_csv(filename, dtype=str)
    data = clean_data(
        data,
        sf133_data,
        {
            "ata": "allocation_transfer_agency",
            "aid": "agency_identifier",
            "availability_type_code": "availability_type_code",
            "bpoa": "beginning_period_of_availa",
            "epoa": "ending_period_of_availabil",
            "main_account": "main_account_code",
            "sub_account": "sub_account_code",
            "fiscal_year": "fiscal_year",
            "period": "period",
            "line_num": "line",
            "amount_summed": "amount"
        },
        {
            "allocation_transfer_agency": {
                "pad_to_length": 3
            },
            "agency_identifier": {
                "pad_to_length": 3
            },
            "main_account_code": {
                "pad_to_length": 4
            },
            "sub_account_code": {
                "pad_to_length": 3
            },
            # next 3 lines handle the TAS fields that shouldn't
            # be padded but should still be empty spaces rather
            # than NULLs. this ensures that the downstream pivot & melt
            # (which insert the missing 0-value SF-133 lines)
            # will work as expected (values used in the pivot
            # index cannot be NULL).
            # the "pad_to_length: 0" works around the fact
            # that sometimes the incoming data for these columns
            # is a single space and sometimes it is blank/NULL.
            "beginning_period_of_availa": {
                "pad_to_length": 0
            },
            "ending_period_of_availabil": {
                "pad_to_length": 0
            },
            "availability_type_code": {
                "pad_to_length": 0
            },
            "amount": {
                "strip_commas": True
            }
        })

    # todo: find out how to handle dup rows (e.g., same tas/period/line number)
    # line numbers 2002 and 2012 are the only duped SF 133 report line numbers,
    # and they are not used by the validation rules, so for now
    # just remove them before loading our SF-133 table
    dupe_line_numbers = ['2002', '2102']
    data = data[~data.line.isin(dupe_line_numbers)]

    # add concatenated TAS field for internal use (i.e., joining to staging tables)
    data['tas'] = data.apply(lambda row: format_internal_tas(row), axis=1)
    data['amount'] = data['amount'].astype(float)

    data = fill_blank_sf133_lines(data)

    return data
예제 #21
0
def clean_duns_csv_data(data):
    return clean_data(data, DUNS, column_mappings, {})
예제 #22
0
def load_program_activity_data(base_path):
    """ Load program activity lookup table.

        Args:
            base_path: directory of domain config files
    """
    last_upload = get_date_of_current_pa_upload(base_path)
    if not (last_upload > get_stored_pa_last_upload()):
        return

    program_activity_file = get_program_activity_file(base_path)

    logger.info('Loading program activity: ' + PA_FILE_NAME)

    with create_app().app_context():
        sess = GlobalDB.db().session
        try:
            data = pd.read_csv(program_activity_file, dtype=str)
        except pd.io.common.EmptyDataError as e:
            log_blank_file()
            sys.exit(
                4
            )  # exit code chosen arbitrarily, to indicate distinct failure states

        headers = set([header.upper() for header in list(data)])

        if not VALID_HEADERS.issubset(headers):
            logger.error(
                "Missing required headers. Required headers include: %s" %
                str(VALID_HEADERS))
            sys.exit(4)

        try:
            dropped_count, data = clean_data(
                data, ProgramActivity, {
                    "fyq": "fiscal_year_quarter",
                    "agency_code": "agency_id",
                    "allocation_id": "allocation_transfer_id",
                    "account_code": "account_number",
                    "pa_code": "program_activity_code",
                    "pa_title": "program_activity_name"
                }, {
                    "program_activity_code": {
                        "pad_to_length": 4
                    },
                    "agency_id": {
                        "pad_to_length": 3
                    },
                    "allocation_transfer_id": {
                        "pad_to_length": 3,
                        "keep_null": True
                    },
                    "account_number": {
                        "pad_to_length": 4
                    }
                }, [
                    "agency_id", "program_activity_code", "account_number",
                    "program_activity_name"
                ], True)
        except FailureThresholdExceededException as e:
            if e.count == 0:
                log_blank_file()
                sys.exit(4)
            else:
                count_str = "Application tried to drop {} rows".format(e.count)
                logger.error(
                    "Loading of program activity file failed due to exceeded failure threshold. "
                    + count_str)
                sys.exit(5)

        sess.query(ProgramActivity).delete()

        # Lowercase Program Activity Name
        data['program_activity_name'] = data['program_activity_name'].apply(
            lambda x: lowercase_or_notify(x))

        # because we're only loading a subset of program activity info,
        # there will be duplicate records in the dataframe. this is ok,
        # but need to de-duped before the db load. We also need to log them.
        base_count = data.shape[0]
        data.drop_duplicates(inplace=True)
        logger.info("Dropped {} duplicate rows.".format(base_count -
                                                        data.shape[0]))

        # insert to db
        table_name = ProgramActivity.__table__.name
        num = insert_dataframe(data, table_name, sess.connection())
        sess.commit()

    set_stored_pa_last_upload(last_upload)
    logger.info('{} records inserted to {}'.format(num, table_name))

    if dropped_count > 0:
        sys.exit(3)
def format_fabs_data(data, sess, fips_state_list, state_code_list,
                     sub_tier_list, county_code_list):
    logger.info("formatting data")

    # drop rows with null FAIN and URI
    data = data[~((data['federal_award_id'].isnull()) &
                  (data['uri'].isnull()))].copy()

    if len(data.index) == 0:
        return None

    proper_casing_cols = [
        'recipient_name', 'recipient_city_name', 'recipient_county_name',
        'receip_addr1', 'receip_addr2', 'receip_addr3'
    ]
    for col in proper_casing_cols:
        data[col] = data.apply(lambda x: format_proper_casing(x, col), axis=1)

    cols_with_colons = [
        'action_type', 'assistance_type', 'agency_code', 'recipient_type',
        'correction_late_ind'
    ]
    for col in cols_with_colons:
        data[col] = data.apply(lambda x: remove_data_after_colon(x, col),
                               axis=1)

    # data['recipient_city_code'] = data.apply(lambda x: format_integer_code(x, 'recipient_city_code', 5), axis=1)
    data['recipient_county_code'] = data.apply(
        lambda x: format_integer_code(x, 'recipient_county_code', 3), axis=1)
    data['legal_entity_zip5'] = data.apply(lambda x: format_zip_five(x),
                                           axis=1)
    data['legal_entity_zip_last4'] = data.apply(lambda x: format_zip_four(x),
                                                axis=1)
    data['total_funding_amount'] = data.apply(
        lambda x: format_total_funding(x), axis=1)
    data['starting_date'] = data.apply(
        lambda x: format_date(x, 'starting_date'), axis=1)
    data['ending_date'] = data.apply(lambda x: format_date(x, 'ending_date'),
                                     axis=1)
    data['record_type'] = data.apply(lambda x: format_record_type(x), axis=1)
    data['principal_place_zip'] = data.apply(lambda x: format_full_zip(x),
                                             axis=1)
    data['principal_place_cd'] = data.apply(
        lambda x: format_cd(x, 'principal_place_cd'), axis=1)
    data['recipient_cd'] = data.apply(lambda x: format_cd(x, 'recipient_cd'),
                                      axis=1)
    data['is_historical'] = np.full(len(data.index), True, dtype=bool)
    logger.info("Starting derive_legal_entity_city_code")
    data['legal_entity_city_code'] = data.apply(
        lambda x: derive_legal_entity_city_code(x, sess), axis=1)
    logger.info("Starting derive_awarding_agency_code")
    data['awarding_agency_code'] = data.apply(
        lambda x: derive_awarding_agency_code(x, sub_tier_list), axis=1)
    logger.info("Starting derive_awarding_agency_name")
    data['awarding_agency_name'] = data.apply(
        lambda x: derive_awarding_agency_name(x, sub_tier_list), axis=1)
    logger.info("Starting derive_awarding_sub_tier_agency_n")
    data['awarding_sub_tier_agency_n'] = data.apply(
        lambda x: derive_awarding_sub_tier_agency_n(x, sub_tier_list), axis=1)
    logger.info("Starting derive_place_of_perform_county_na")
    data['place_of_perform_county_na'] = data.apply(
        lambda x: derive_place_of_perform_county_na(
            x, sess, fips_state_list, state_code_list, county_code_list),
        axis=1)
    logger.info("Starting derive_place_of_performance_city")
    data['place_of_perform_city'] = data.apply(
        lambda x: derive_place_of_performance_city(x, sess, fips_state_list,
                                                   state_code_list),
        axis=1)
    logger.info("Starting derive_legal_entity_state_name")
    data['legal_entity_state_name'] = data.apply(
        lambda x: derive_legal_entity_state_name(x, sess, fips_state_list,
                                                 state_code_list),
        axis=1)
    logger.info("Finished derivations")

    # adding columns missing from historical data
    null_list = [
        'awarding_office_code', 'awarding_office_name', 'funding_office_name',
        'funding_agency_name', 'funding_agency_code', 'funding_office_code',
        'funding_sub_tier_agency_co', 'funding_sub_tier_agency_na',
        'legal_entity_foreign_city', 'legal_entity_foreign_posta',
        'legal_entity_foreign_provi', 'place_of_performance_forei'
    ]
    for item in null_list:
        data[item] = None

    cdata = clean_data(
        data, PublishedAwardFinancialAssistance, {
            'obligation_action_date': 'action_date',
            'action_type': 'action_type',
            'assistance_type': 'assistance_type',
            'project_description': 'award_description',
            'recipient_name': 'awardee_or_recipient_legal',
            'duns_no': 'awardee_or_recipient_uniqu',
            'awarding_agency_code': 'awarding_agency_code',
            'awarding_agency_name': 'awarding_agency_name',
            'awarding_office_code': 'awarding_office_code',
            'awarding_office_name': 'awarding_office_name',
            'agency_code': 'awarding_sub_tier_agency_c',
            'awarding_sub_tier_agency_n': 'awarding_sub_tier_agency_n',
            'federal_award_mod': 'award_modification_amendme',
            'rec_flag': 'business_funds_indicator',
            'recipient_type': 'business_types',
            'cfda_program_num': 'cfda_number',
            'cfda_program_title': 'cfda_title',
            'correction_late_ind': 'correction_late_delete_ind',
            'face_loan_guran': 'face_value_loan_guarantee',
            'federal_award_id': 'fain',
            'fed_funding_amount': 'federal_action_obligation',
            'fyq_correction': 'fiscal_year_and_quarter_co',
            'funding_agency_name': 'funding_agency_name',
            'funding_agency_code': 'funding_agency_code',
            'funding_office_code': 'funding_office_code',
            'funding_office_name': 'funding_office_name',
            'funding_sub_tier_agency_co': 'funding_sub_tier_agency_co',
            'funding_sub_tier_agency_na': 'funding_sub_tier_agency_na',
            'is_historical': 'is_historical',
            'receip_addr1': 'legal_entity_address_line1',
            'receip_addr2': 'legal_entity_address_line2',
            'receip_addr3': 'legal_entity_address_line3',
            'legal_entity_city_code': 'legal_entity_city_code',
            'recipient_city_name': 'legal_entity_city_name',
            'recipient_cd': 'legal_entity_congressional',
            'recipient_country_code': 'legal_entity_country_code',
            'recipient_county_code': 'legal_entity_county_code',
            'recipient_county_name': 'legal_entity_county_name',
            'legal_entity_foreign_city': 'legal_entity_foreign_city',
            'legal_entity_foreign_posta': 'legal_entity_foreign_posta',
            'legal_entity_foreign_provi': 'legal_entity_foreign_provi',
            'recipient_state_code': 'legal_entity_state_code',
            'legal_entity_zip5': 'legal_entity_zip5',
            'legal_entity_state_name': 'legal_entity_state_name',
            'legal_entity_zip_last4': 'legal_entity_zip_last4',
            'last_modified_date': 'modified_at',
            'non_fed_funding_amount': 'non_federal_funding_amount',
            'orig_sub_guran': 'original_loan_subsidy_cost',
            'ending_date': 'period_of_performance_curr',
            'starting_date': 'period_of_performance_star',
            'principal_place_code': 'place_of_performance_code',
            'principal_place_cd': 'place_of_performance_congr',
            'principal_place_country_code': 'place_of_perform_country_c',
            'place_of_performance_forei': 'place_of_performance_forei',
            'principal_place_zip': 'place_of_performance_zip4a',
            'place_of_perform_city': 'place_of_performance_city',
            'place_of_perform_county_na': 'place_of_perform_county_na',
            'principal_place_state': 'place_of_perform_state_nam',
            'record_type': 'record_type',
            'sai_number': 'sai_number',
            'total_funding_amount': 'total_funding_amount',
            'uri': 'uri'
        }, {
            'place_of_performance_congr': {
                'pad_to_length': 2,
                'keep_null': True
            },
            'legal_entity_congressional': {
                'pad_to_length': 2,
                'keep_null': True
            }
        })

    # make a pass through the dataframe, changing any empty values to None, to ensure that those are represented as
    # NULL in the db.
    cdata = cdata.replace(np.nan, '', regex=True)
    cdata = cdata.applymap(lambda x: str(x).strip()
                           if len(str(x).strip()) else None)

    # generate the afa_generated_unique field
    cdata['afa_generated_unique'] = cdata.apply(
        lambda x: generate_unique_string(x), axis=1)

    return cdata