/
webtofeed.py
122 lines (86 loc) · 2.39 KB
/
webtofeed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from bs4 import BeautifulSoup
from bs4 import Tag
import requests
import sys
import argparse
if sys.version_info[0] == 2:
from urlparse import urlparse
else:
from urllib.parse import urlparse
from feedgen.feed import FeedGenerator
maxlinkHighness = 2
def linkIsAbsolute(url):
return bool(urlparse(url).netloc)
def findInnerLink(item):
a = item.find('a')
if isinstance(a, Tag):
return a
else:
return None
def findOuterLink(item):
return getParentWithLink(item, 0)
def getParentWithLink(item, linkHeigth):
if (linkHeigth <= maxlinkHighness):
link = item.find('a')
if isinstance(link, Tag):
return link
else:
returnedLink = getParentWithLink(item.parent, linkHeigth+1)
if isinstance(returnedLink, Tag):
return returnedLink
else:
return None
def parseString(html, url, tag):
parsed_html = BeautifulSoup(html)
parsedUrl = urlparse(url)
baseUrl = parsedUrl.scheme+"://"+parsedUrl.netloc
fg = FeedGenerator()
fg.id(url)
fg.title('Generated feed for ' + url)
fg.link( href=url, rel='alternate' )
fg.subtitle('Autogenerated by alltorss.py based on tag ' + tag)
for item in parsed_html.body.find_all(tag):
topic = item.text.strip()
#check if item contains a link
innerLink = findInnerLink(item)
outerLink = findOuterLink(item)
if (innerLink != None):
link = innerLink
elif (outerLink != None):
link = outerLink
else:
link = None
if isinstance(link, Tag) and link.has_attr('href'):
linkHref = link['href']
fe = fg.add_entry()
if (linkIsAbsolute(linkHref)):
fullLink = linkHref
else:
fullLink = baseUrl + linkHref
fe.id(fullLink)
fe.title(topic)
fe.link( href=fullLink )
return fg
def parseUrl(url, tag):
r = requests.get(url)
if (r.status_code != 200):
sys.stderr.write("Error code is "+ str(r.status_code)+"\n")
exit(1)
html = r.text
return parseString(html, url, tag)
def createFeedString(feed, feedtype):
#TODO Parse for feedtype
rssfeed = feed.rss_str(pretty=True)
return rssfeed
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="URL to be parsed")
parser.add_argument("--tag", help="Tag that encapsulates news content")
args = parser.parse_args()
url = args.url
tag = args.tag
feed = parseUrl(url, tag)
feedString = createFeedString(feed, "RSS")
print(feedString.decode('utf-8') if type(feedString) == type(b'') else feedString)
if __name__ == "__main__":
main()