def get_record_processor(catcodes, candidates, committees): return chain_filters( CSVFieldVerifier(), # transaction filters FieldAdder('transaction_namespace', CRP_TRANSACTION_NAMESPACE), FieldMerger({'transaction_id': ('cycle', 'fec_rec_no')}, lambda cycle, fecid: 'pac2cand:%s:%s' % (cycle, fecid), keep_fields=True), FieldMerger({'transaction_type': ('type', )}, lambda t: t.strip().lower()), # date stamp FieldModifier('date', parse_date_iso), # contributor and recipient fields ContributorFilter(committees), FieldRenamer({'contributor_ext_id': 'pac_id'}), FieldAdder('contributor_type', 'C'), Pac2CandRecipientFilter(candidates), FieldAdder('recipient_type', 'P'), # catcode CatCodeFilter('contributor', catcodes), # add static fields FieldAdder('is_amendment', False), FieldMerger({'candidacy_status': ('curr_cand', 'cycle_cand')}, lambda curr, cycle: "" if cycle != 'Y' else curr == 'Y' and cycle == 'Y', keep_fields=False), # filter through spec SpecFilter(SPEC))
def load_payment(csvpath, *args, **options): loader = FARAPaymentLoader( source='DOJ', description='load from denormalized CSVs', imported_by="loadfara.py (%s)" % os.getenv('LOGNAME', 'unknown'), ) payment_record_processor = chain_filters( CSVFieldVerifier(), FieldRemover('id'), FieldRemover('import_reference'), FieldAdder('import_reference', loader.import_session), FieldCopier({'date_asterisk': 'date'}), FieldModifier('date', parse_fara_date), FieldModifier('date_asterisk', parse_fara_asterisk), FieldModifier('amount', parse_decimal), FieldModifier(('document_id', 'client_id', 'registrant_id', 'record_id', 'location_id', 'subcontractor_id'), parse_int), UnicodeFilter(), StringLengthFilter(Payment)) output_func = chain_filters( LoaderEmitter(loader), Every(REPORT_FREQUENCY, progress_tick), ) input_iterator = VerifiedCSVSource(open(os.path.abspath(csvpath)), fieldnames=Payment.FIELDNAMES, skiprows=1) load_data(input_iterator, payment_record_processor, output_func)
def get_record_processor(import_session): return chain_filters( CSVFieldVerifier(), FieldRemover('id'), FieldRemover('import_reference'), FieldAdder('import_reference', import_session), FieldModifier('amount', lambda a: Decimal(str(a))), FieldModifier(['cycle'], parse_int), FieldModifier(['date'], parse_date), BooleanFilter('is_amendment'), UnicodeFilter(), StringLengthFilter(Contribution))
def issue_handler(inpath, outpath, infields, outfields): run_recipe( VerifiedCSVSource(open(inpath, 'r'), fieldnames=infields, quotechar='|'), FieldCountValidator(len(FILE_TYPES['lob_issue'])), CSVFieldVerifier(), FieldRenamer({ 'id': 'SI_ID', 'transaction': 'UniqID', 'general_issue_code': 'IssueID', 'general_issue': 'Issue', 'specific_issue': 'SpecIssue', 'year': 'Year', }), #DebugEmitter(), CSVEmitter(open(outpath, 'w'), fieldnames=outfields), )
def run(self): run_recipe( VerifiedCSVSource(open(self.inpath)), CSVFieldVerifier(), FieldModifier('year', lambda x: int(x) if x else None), FieldRenamer({'transaction_id': 'transaction'}), NoneFilter(), FieldModifier('specific_issue', lambda x: '' if x is None else x), TRANSACTION_FILTER, UnicodeFilter(), CountEmitter(every=10000, log=self.log), LoaderEmitter(IssueLoader( source=self.inpath, description='load from denormalized CSVs', imported_by="loadlobbying (%s)" % os.getenv('LOGNAME', 'unknown'), log=self.log, ), commit_every=100), )
def get_record_processor(catcodes, candidates, committees): return chain_filters( CSVFieldVerifier(), ContribRecipFilter(), CommitteeFilter(committees), Pac2PacRecipientFilter(candidates, committees), # transaction filters FieldAdder('transaction_namespace', CRP_TRANSACTION_NAMESPACE), FieldMerger({'transaction_id': ('cycle','fec_rec_no')}, lambda cycle, fecid: 'pac2pac:%s:%s' % (cycle, fecid), keep_fields=True), FieldMerger({'transaction_type': ('type',)}, lambda t: t.strip().lower()), # filing reference ID FieldRenamer({'filing_id': 'microfilm'}), # date stamp FieldModifier('date', parse_date_iso), # catcode FieldMerger({'contributor_category': ('real_code',)}, lambda s: s.upper() if s else "", keep_fields=True), FieldMerger({'recipient_category': ('recip_prim_code',)}, lambda s: s.upper() if s else "", keep_fields=True), FieldRenamer({'contributor_city': 'city', 'contributor_state': 'state', 'contributor_zipcode': 'zipcode', 'contributor_occupation': 'fec_occ_emp', 'recipient_party': 'party',}), FieldModifier('contributor_state', lambda s: s.strip().upper() if s else ""), FieldAdder('contributor_type', 'C'), # add static fields FieldAdder('jurisdiction', 'F'), FieldMerger({'is_amendment': ('amend',)}, lambda s: s.strip().upper() != 'N'), FieldMerger({'candidacy_status': ('curr_cand', 'cycle_cand')}, lambda curr, cycle: "" if cycle != 'Y' else curr == 'Y' and cycle == 'Y', keep_fields=False ), # filter through spec SpecFilter(SPEC))
def get_record_processor(year, import_ref): return chain_filters( CSVFieldVerifier(), FieldRemover('id'), FieldRemover('county'), FieldAdder('fiscal_year', year), FieldAdder('import_reference', import_ref), FieldModifier(['notes', 'house_members', 'senate_members'], lambda s: string_filter(s, 1024)), FieldModifier([ 'description', 'house_parties', 'house_states', 'house_districts', 'senate_parties', 'senate_states', 'raw_recipient' ], lambda s: string_filter(s, 512)), FieldModifier(['bill_section', 'bill_subsection'], lambda s: string_filter(s, 256)), FieldModifier([ 'budget_amount', 'senate_amount', 'house_amount', 'omni_amount', 'final_amount' ], amount_filter), FieldMerger({'description': ('project_heading', 'description')}, _prepend), FieldModifier(['presidential'], lambda p: presidential_raw.get(p, '')), FieldModifier(['undisclosed'], lambda u: undisclosed_raw.get(u, '')), FieldMerger({'locations': ('city', 'state')}, _normalize_locations), FieldMerger( { 'members': ('house_members', 'house_parties', 'house_states', 'house_districts', 'senate_members', 'senate_parties', 'senate_states') }, _normalize_members, keep_fields=True), FieldMerger({'recipients': ('raw_recipient', )}, _normalize_recipients), )
def get_allocated_record_processor(): input_type_conversions = dict([(field, conversion_func) for (field, _, conversion_func) in CSV_SQL_MAPPING if conversion_func]) return chain_filters( CSVFieldVerifier(), MultiFieldConversionFilter(input_type_conversions), # munge fields BestAvailableFilter(), RecipientFilter(), SeatFilter(), IdsFilter(), ContributorTypeFilter(), FieldModifier('date', lambda x: str(x) if x else None), ZipCleaner(), # add static fields FieldAdder('is_amendment', False), FieldAdder('transaction_namespace', NIMSP_TRANSACTION_NAMESPACE), FieldListFilter(FIELDNAMES + ['contributionid']))
def get_record_processor(catcodes, candidates, committees): return chain_filters( CSVFieldVerifier(), # transaction filters FieldAdder('transaction_namespace', CRP_TRANSACTION_NAMESPACE), FieldMerger({'transaction_id': ('cycle', 'fec_trans_id')}, lambda cycle, fecid: 'indiv:%s:%s' % (cycle, fecid), keep_fields=True), FieldMerger({'transaction_type': ('type', )}, lambda t: t.strip().lower() if t else '', keep_fields=True), # filing reference ID FieldRenamer({'filing_id': 'microfilm'}), # date stamp FieldModifier('date', parse_date_iso), # rename contributor, organization, and parent_organization fields FieldRenamer({ 'contributor_name': 'contrib', 'parent_organization_name': 'ult_org', }), IndivRecipientFilter(candidates, committees), CommitteeFilter(committees), OrganizationFilter(), # create URNs FieldRenamer({ 'contributor_ext_id': 'contrib_id', 'committee_ext_id': 'cmte_id' }), # address and gender fields FieldRenamer({ 'contributor_address': 'street', 'contributor_city': 'city', 'contributor_state': 'state', 'contributor_zipcode': 'zipcode', 'contributor_gender': 'gender' }), FieldModifier('contributor_state', lambda s: s.upper() if s else ""), FieldModifier('contributor_gender', lambda s: s.upper() if s else ""), # employer/occupation filter FECOccupationFilter(), # catcode CatCodeFilter('contributor', catcodes), # add static fields FieldAdder('contributor_type', 'I'), FieldAdder('is_amendment', False), FieldMerger({'candidacy_status': ('curr_cand', 'cycle_cand')}, lambda curr, cycle: "" if cycle != 'Y' else curr == 'Y' and cycle == 'Y', keep_fields=False), # filter through spec SpecFilter(SPEC))
def get_unallocated_record_processor(salts_db): dcid = DCIDFilter(SALT_KEY) return chain_filters(CSVFieldVerifier(), FieldModifier(['contributionid'], parse_int), FieldModifier(['amount'], parse_decimal), SaltFilter(100, salts_db, dcid), dcid)