def loadroll(fn): roll = web.storage() roll.id = fn.split('/')[-1].split('.')[0] vote = xmltramp.load(fn) if vote['bill':]: b = vote.bill roll.bill_id = 'us/%s/%s%s' % (b('session'), b('type'), b('number')) else: roll.bill_id = None roll.type = str(vote.type) roll.question = str(vote.question) roll.required = str(vote.required) roll.result = str(vote.result) try: db.insert('roll', seqname=False, **roll) except IntegrityError: if not db.update('roll', where="id=" + web.sqlquote(roll.id), bill_id=roll.bill_id): print "\nMissing bill:", roll.bill_id raise NotDone with db.transaction(): db.delete('vote', where="roll_id=$roll.id", vars=locals()) for voter in vote['voter':]: rep = govtrackp(voter('id')) if rep: db.insert('vote', seqname=False, politician_id=rep, roll_id=roll.id, vote=fixvote(voter('vote'))) else: pass #@@!--check again after load_everyone
def load_data(): c = csv.reader(file('../data/crawl/maplight/uniq_map_export_bill_research.csv')) supportdict = {'0': -1, '1': 1, '2': 0 } #0: oppose ; 1: support; 2: not known (from README) with db.transaction(): db.delete('interest_group_bill_support', '1=1') for line in c: if not line[0].startswith('#'): category_id, longname, maplightid, session, measure, support = line support = supportdict[support] if support == 0: continue typenumber = measure.lower().replace(' ', '') r = db.select('interest_group', what="id", where="longname=$longname", vars=locals()) if r: groupid = r[0].id else: groupid = db.insert('interest_group', longname=longname, category_id=category_id) bill_id = 'us/%s/%s' % (session, typenumber) r = db.select('bill', where="id=$bill_id", vars=locals()) if not r: filename = "../data/crawl/govtrack/us/%s/bills/%s.xml" % (session, typenumber) bills.loadbill(filename, maplightid=maplightid) else: db.update('bill', maplightid=maplightid, where="id=$bill_id", vars=locals()) try: #print '\r', bill_id, db.insert('interest_group_bill_support', seqname=False, bill_id=bill_id, group_id=groupid, support=support) except: print '\n Duplicate row with billid %s groupid %s support %s longname %s' % (bill_id, groupid, support, longname) raise
def generate_similarities(): """ Generate similarity information for each (interest group, politician) pair and store in DB """ result = db.query( 'select igbp.group_id, position.politician_id, igbp.support, position.vote' ' from interest_group_bill_support igbp, position' ' where igbp.bill_id = position.bill_id') sim = {} total = {} for r in result: k = (r.group_id, r.politician_id) if r.support == r.vote and r.support != 0: sim[k] = sim.get(k, 0) + 1 total[k] = total.get(k, 0) + 1 with db.transaction(): db.delete('group_politician_similarity', '1=1') for k, agreed in sim.items(): group_id, politician_id = k db.insert('group_politician_similarity', seqname=False, group_id=group_id, politician_id=politician_id, agreed=agreed, total=total[k])
def loadbill(fn, maplightid=None): bill = xmltramp.load(fn) d = bill2dict(bill) d.maplightid = maplightid try: bill_id = d.id db.insert('bill', seqname=False, **d) except IntegrityError: bill_id = d.pop('id') db.update('bill', where="id=" + web.sqlquote(bill_id), **d) positions = {} for vote in bill.actions['vote':]: if not vote().get('roll'): continue rolldoc = '/us/%s/rolls/%s%s-%s.xml' % ( d.session, vote('where'), vote('datetime')[:4], vote('roll')) roll = xmltramp.load(GOVTRACK_CRAWL + rolldoc) for voter in roll['voter':]: positions[govtrackp(voter('id'))] = fixvote(voter('vote')) if None in positions: del positions[None] with db.transaction(): db.delete('position', where='bill_id=$bill_id', vars=locals()) for p, v in positions.iteritems(): db.insert('position', seqname=False, bill_id=bill_id, politician_id=p, vote=v)
def load_categories(): c = csv.reader(file('../data/crawl/maplight/CRP_Categories.csv')) with db.transaction(): db.delete('category', '1=1') for line in c: if not line[0].startswith('#'): cid, cname, industry, sector, empty = line db.insert('category', seqname=False, id=cid, name=cname, industry=industry, sector=sector)
def main(): with db.transaction(): db.delete("vote", "1=1") bill_ids = ", ".join((str(s.id) for s in db.select("bill", what="id"))) db.delete("interest_group_bill_support", where="bill_id in ($bill_ids)", vars=locals()) db.delete("bill", "1=1") for fn in glob.glob("../data/crawl/govtrack/us/*/bills/*.xml"): loadbill(fn)
def load_data(): c = csv.reader( file('../data/crawl/maplight/uniq_map_export_bill_research.csv')) supportdict = { '0': -1, '1': 1, '2': 0 } #0: oppose ; 1: support; 2: not known (from README) with db.transaction(): db.delete('interest_group_bill_support', '1=1') for line in c: if not line[0].startswith('#'): category_id, longname, maplightid, session, measure, support = line support = supportdict[support] if support == 0: continue typenumber = measure.lower().replace(' ', '') r = db.select('interest_group', what="id", where="longname=$longname", vars=locals()) if r: groupid = r[0].id else: groupid = db.insert('interest_group', longname=longname, category_id=category_id) bill_id = 'us/%s/%s' % (session, typenumber) r = db.select('bill', where="id=$bill_id", vars=locals()) if not r: filename = "../data/crawl/govtrack/us/%s/bills/%s.xml" % ( session, typenumber) bills.loadbill(filename, maplightid=maplightid) else: db.update('bill', maplightid=maplightid, where="id=$bill_id", vars=locals()) try: #print '\r', bill_id, db.insert('interest_group_bill_support', seqname=False, bill_id=bill_id, group_id=groupid, support=support) except: print '\n Duplicate row with billid %s groupid %s support %s longname %s' % ( bill_id, groupid, support, longname) raise
def load_fec_ids(): with db.transaction(): db.delete('politician_fec_ids', '1=1') fh = iter(file('../data/crawl/opensecrets/FEC_CRP_ID.tsv')) header = fh.next() for line in fh: fec_id, crp_id = line.split() if tools.opensecretsp(crp_id): fec2pol[fec_id] = tools.opensecretsp(crp_id) db.insert('politician_fec_ids', seqname=False, politician_id=tools.opensecretsp(crp_id), fec_id=fec_id)
def load_fec_contributions(): t = db.transaction(); n = 0 db.delete('contribution', '1=1') for f in fec_cobol.parse_contributions(): f = web.storage(f) f.occupation = f.occupation.replace('N/A', '') if '/' in f.occupation: employer, occupation = f.occupation.split('/', 1) else: employer = '' occupation = f.occupation try: datetime.date(*[int(x) for x in f.date.split('-')]) except ValueError: f.date = None db.insert('contribution', fec_record_id = f.get('fec_record_id'), microfilm_loc = f.microfilm_loc, recipient_id = f.filer_id, name = f.name, street = f.get('street'), city = f.city, state = f.state, zip = f.zip, occupation = occupation, employer = employer, employer_stem = tools.stemcorpname(employer), committee = f.from_id or None, sent = f.date, amount = f.amount ) n += 1 if n % 10000 == 0: t.commit(); t = db.transaction(); print n t.commit() print "Creating indexes on table `contribution`..." schema.Contribution.create_indexes() print "done."
def load_fec_contributions(): t = db.transaction() n = 0 db.delete('contribution', '1=1') for f in fec_cobol.parse_contributions(): f = web.storage(f) f.occupation = f.occupation.replace('N/A', '') if '/' in f.occupation: employer, occupation = f.occupation.split('/', 1) else: employer = '' occupation = f.occupation try: datetime.date(*[int(x) for x in f.date.split('-')]) except ValueError: f.date = None db.insert('contribution', fec_record_id=f.get('fec_record_id'), microfilm_loc=f.microfilm_loc, recipient_id=f.filer_id, name=f.name, street=f.get('street'), city=f.city, state=f.state, zip=f.zip, occupation=occupation, employer=employer, employer_stem=tools.stemcorpname(employer), committee=f.from_id or None, sent=f.date, amount=f.amount) n += 1 if n % 10000 == 0: t.commit() t = db.transaction() print n t.commit()
def generate_similarities(): """ Generate similarity information for each (interest group, politician) pair and store in DB """ result = db.query('select igbp.group_id, position.politician_id, igbp.support, position.vote' ' from interest_group_bill_support igbp, position' ' where igbp.bill_id = position.bill_id') sim = {} total = {} for r in result: k = (r.group_id, r.politician_id) if r.support == r.vote and r.support != 0: sim[k] = sim.get(k, 0) + 1 total[k] = total.get(k, 0) + 1 with db.transaction(): db.delete('group_politician_similarity', '1=1') for k, agreed in sim.items(): group_id, politician_id = k db.insert('group_politician_similarity', seqname=False, group_id=group_id, politician_id=politician_id, agreed=agreed, total=total[k])
if loc_code in ['DC-98','PR-98']: continue for internal_key, value in row.items(): db.insert('census_data', seqname=False, district_id=loc_code, internal_key=internal_key, census_type=type, value=value) print >>sys.stderr, "...Done loading census_data table." def main(): for type in [1, 3]: load_census_meta(type) load_census_data(type) load_census_population() if __name__ == "__main__": if batch_mode: from bulk_loader import bulk_loader_db db = bulk_loader_db(os.environ.get('WATCHDOG_TABLE', 'watchdog_dev')) meta_cols = ['internal_key', 'census_type', 'hr_key', 'label'] db.open_table('census_meta', meta_cols, filename=tsv_file_format%'census_meta') data_cols = ['district_id', 'internal_key', 'census_type', 'value'] db.open_table('census_data', data_cols, filename=tsv_file_format%'census_data') pop_cols = ['state_id', 'county_id', 'zip_id', 'tract_id', 'blockgrp_id', 'block_id', 'district_id', 'sumlev', 'population', 'area_land'] db.open_table('census_population', pop_cols, filename=tsv_file_format%'census_population') main() else: from tools import db with db.transaction(): #db.delete('census_data', where='1=1') #db.delete('census_meta', where='1=1') #db.delete('census_population', where='1=1') main()
load_census_population() if __name__ == "__main__": if batch_mode: from bulk_loader import bulk_loader_db db = bulk_loader_db(os.environ.get('WATCHDOG_TABLE', 'watchdog_dev')) meta_cols = ['internal_key', 'census_type', 'hr_key', 'label'] db.open_table('census_meta', meta_cols, filename=tsv_file_format % 'census_meta') data_cols = ['district_id', 'internal_key', 'census_type', 'value'] db.open_table('census_data', data_cols, filename=tsv_file_format % 'census_data') pop_cols = [ 'state_id', 'county_id', 'zip_id', 'tract_id', 'blockgrp_id', 'block_id', 'district_id', 'sumlev', 'population', 'area_land' ] db.open_table('census_population', pop_cols, filename=tsv_file_format % 'census_population') main() else: from tools import db with db.transaction(): #db.delete('census_data', where='1=1') #db.delete('census_meta', where='1=1') #db.delete('census_population', where='1=1') main()