forked from imshashank/osuevents
-
Notifications
You must be signed in to change notification settings - Fork 0
/
crawl.py
172 lines (127 loc) · 4.58 KB
/
crawl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import re
from HTMLParser import HTMLParser
import csv
import datetime
def get_page(url):
try:
import urllib
return urllib.urlopen(url).read()
except:
return ""
def get_event(url):
name = []
info = []
datetime = []
location = []
contact = []
phonenum = []
category = []
eventtype = []
content = get_page(url)
eventdetails = geteventname(content)
name.append(eventdetails[0])
content = eventdetails[1]
eventdetails = geteventinfo(content)
info.append(eventdetails[0])
content = eventdetails[1]
eventdetails = getdatetime(content)
datetime.append(eventdetails[0])
content = eventdetails[1]
eventdetails = getlocation(content)
location.append(eventdetails[0])
content = eventdetails[1]
eventdetails = getcontact(content)
contact.append(eventdetails[0])
content = eventdetails[1]
eventdetails = getphonenum(content)
phonenum.append(eventdetails[0])
content = eventdetails[1]
eventdetails = getcategory(content)
category.append(eventdetails[0])
content = eventdetails[1]
eventdetails = geteventtype(content)
eventtype.append(eventdetails[0])
content = eventdetails[1]
alldata = [name, info, datetime, location, contact, phonenum, category, eventtype]
writer = csv.writer(f, delimiter = ',')
writer.writerows([alldata])
#return name, info, datetime, location, contact, phonenum, category, eventtype
def getdata(content,tag1,tag2,extratag,endtag,value):
pos = content.find(tag1)
pos2 = content.find(tag2,pos)
pos3 = content.find(extratag,pos2)
startpos = pos3 + value
endpos = content.find(endtag,startpos + 1)
data = content[startpos:endpos]
data = strip_tags(data.strip())
content = content[endpos:]
return data,content
def geteventname(content):
name,content = getdata(content,'<h1>','>','','</h1>',1)
return name,content
def geteventinfo(content):
info,content = getdata(content,'event_info','<br','','</td>',4)
return info,content
def getdatetime(content):
datetime,content = getdata(content,'<td','><td','>','</td>',1)
return datetime,content
def getlocation(content):
location,content = getdata(content,'<td','><td','>','</td>',1)
return location,content
def getcontact(content):
contact,content = getdata(content,'<td','><td','>','</td>',1)
return contact,content
def getphonenum(content):
phonenum,content = getdata(content,'<td','><td','>','</td>',1)
return phonenum,content
def getcategory(content):
category,content = getdata(content,'<td','><td','>','</td>',1)
return category,content
def geteventtype(content):
eventtype,content = getdata(content,'<td','><td','>','</td>',1)
return eventtype,content
# To strip off HTML tags -------------------------------------------------
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
# ------------------------------------------------------------------------------------
def getallevents(url):
page = get_page(url)
tocrawl = []
goto = page.find('<br clear="all"/>')
page = page[goto:]
while True:
linkpos = page.find('<a href="')
linkstart = page.find('"',linkpos)
linkend = page.find('"',linkstart+ 1)
link = page[linkstart+1:linkend]
page = page[linkend+1:]
completeurl = 'http://www.osu.edu/events/'+ link
if 'www.osu.edu/events/event' not in completeurl:
break
tocrawl.append(completeurl)
return tocrawl
def run():
url = "http://www.osu.edu/events/indexWeek.php"
for i in range(6):
allevents = getallevents(url)
for each in allevents:
get_event(each)
page = get_page(url)
pos = page.find('Previous')
pos2 = page.find('<a href=',pos)
start = page.find('"',pos2+1)
end = page.find('"',start + 1)
nextweek = page[start+1:end]
url = nextweek
if __name__ == '__main__':
run()