-
Notifications
You must be signed in to change notification settings - Fork 0
/
pharmasouptical.py
77 lines (65 loc) · 2.58 KB
/
pharmasouptical.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from bs4 import BeautifulSoup
import urllib2
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.namespace import Namespace, FOAF
import os
PAY = Namespace("http://reference.data.gov.uk/def/payment")
def get_page(url): #cache webpages locally
print url
filename = 'cache/'+urllib2.quote(url, safe='')
try:
return open(filename).read()
except IOError:
page = urllib2.urlopen(url).read()
open(filename, 'w').write(page)
return page
mainpage = get_page('http://projects.propublica.org/docdollars/')
majorsoup = BeautifulSoup(mainpage)
States = []
for td in majorsoup.find_all("td",class_="label",style=False)[1:-1]:
States.append(td.string)
g = Graph()
state = BNode()
clinic = BNode()
company = BNode()
transaction = BNode()
payment = BNode()
for state in States:
try:
page = get_page('http://projects.propublica.org/docdollars/states/'+state.replace(' ','-'))
soup = BeautifulSoup(page)
state_txns = soup.find_all('tr') #find all transactions
except urllib2.HTTPError:
print state+' not available'
continue
def track(tag): #get payment information
#clinic info
clinic_name = tag[0].find(class_=False).string
DDsite = tag[0].find(href=True)['href'] #Propublica's docdollars site
city = tag[1].string.strip() #city
state = soup.find("strong").string #state
g.add( (clinic, RDF.type, FOAF.Organization) )
g.add( (clinic, FOAF.name, Literal(clinic_name)) )
g.add( (clinic, FOAF.Document, URIRef(DDsite)) )
g.add( (clinic, FOAF.based_near, Literal(city)) )
g.add( (clinic, FOAF.based_near, Literal(state.replace(' ','_'))) )
#company info
company_name = tag[2].string.strip()
g.add( (company, RDF.type, FOAF.Organization) )
g.add( (company, FOAF.name, Literal(company_name)) )
#transaction info
#year = tag[3].string.strip()
#purpose = tag[4].string.strip()
#dollars = tag[5].string.strip()
g.add( (transaction, RDF.type, PAY.Payment) )
g.add( (transaction, PAY.payee, company) )
g.add( (transaction, PAY.payer, clinic) )
#g.add( (transaction, PAY.purchase, purpose) ) #can't find correct BVACOP
#g.add( (transaction, PAY.date, year) )
#g.add( (transaction, PAY.grossAmount, dollars) )
for index in range(1,len(state_txns)):
txn = state_txns[index].find_all('td')
track(txn)
##PRINTING##
print( g.serialize(format='xml') )
print( g.serialize('/home/aimi/pharmasoup/data.rdf', format='xml') )