def addRSS(self):
	#Indexes RSS data by item URL
	tc = TrackingChannel()
	StartDate = self.eventdate - datetime.timedelta(days=3)
	EndDate = self.eventdate + datetime.timedelta(days=7)
	
	#Returns the RSSParser instance used, which can usually be ignored
	tc.parse(self.createURL(StartDate,EndDate))

	RSS10_TITLE = (ns.rss10, 'title')
	RSS10_DESC = (ns.rss10, 'description')

	#You can also use tc.keys()
	items = tc.listItems()
	for item in items:
	    #Each item is a (url, order_index) tuple
	    url = item[0]
	    #Get all the data for the item as a Python dictionary
	    item_data = tc.getItem(item)
	    title = item_data.get(RSS10_TITLE, "(none)")
	    desc = item_data.get(RSS10_DESC, "(none)").replace("<br/>","").replace("\n","").replace("\r","").replace("  "," ")
	    for q in self.query.split():
		if(title.lower().find(q.lower()) >= 0 or desc.lower().find(q.lower())):
			if(len(self.results) <= self.rpp):
				self.results.append(SearchResult(title.decode("utf-8"), url.decode("utf-8"), desc.decode("utf-8")))
				break
Exemplo n.º 2
0
def get_hackaday_feed():
	valid = []
	tc = TrackingChannel()
	tc.parse("http://hackaday.com/feed/")
	items = tc.listItems()
	
	for i in range(len(items)):
		entry = {}
		item = tc.getItem(items[i])
		entry['title'] = cut_title(item.get((ns.rss10, "title")))
		entry['description'] = item.get((ns.rss10, "description"))
		entry['link'] = item.get((ns.rss10, "link"))
		encoded = item.get((u'http://purl.org/rss/1.0/modules/content/', u'encoded'))
		try:
			img_tags = images_from_html(encoded)
			img_src = img_tags[0]['src']
			if not DEBUG:
				thumb = download_and_thumbnail(img_src)
				if thumb == None:
					continue
				entry['img'] = thumb
			#entry.description = remove_html_tags(entry.description)
			valid.append(entry)
		except IOError:
			# don't care about links with pic we can't thumbnail
			continue
	return valid
Exemplo n.º 3
0
    def suck_feed(self):
        # Indexes RSS data by item URL
        tc = TrackingChannel()

        # Returns the RSSParser instance used, which can usually be ignored
        print "Fetching ", self.url, " ..."
        tc.parse(self.url)

        RSS10_TITLE = (ns.rss10, "title")
        RSS10_DESC = (ns.rss10, "description")

        self.list = list()

        items = tc.listItems()
        for item in items:
            url = item[0].replace("ol", "dl")
            print url
            self.list.append(url)
Exemplo n.º 4
0
    def suck_feed(self):
        #Indexes RSS data by item URL
        tc = TrackingChannel()

        #Returns the RSSParser instance used, which can usually be ignored
        print "Fetching ", self.url, " ..."
        tc.parse(self.url)

        RSS10_TITLE = (ns.rss10, 'title')
        RSS10_DESC = (ns.rss10, 'description')

        self.list = list()

        items = tc.listItems()
        for item in items:
            url = item[0].replace("ol", "dl")
            print url
            self.list.append(url)
Exemplo n.º 5
0
def getRSSData(stationCode=10326):
    """acquires rss data from wunderground and parses it for current weather conditions at stationCode"""
    """stationCode for Graz: 11240"""
    
    from RSS import ns, CollectionChannel, TrackingChannel
    
    #Create a tracking channel, which is a data structure that
    #Indexes RSS data by item URL
    tc = TrackingChannel()

    #Returns the RSSParser instance used, which can usually be ignored
    tc.parse("http://rss.wunderground.com/auto/rss_full/global/stations/%s.xml" % stationCode)


    RSS10_TITLE = (ns.rss10, 'title')
    RSS10_DESC = (ns.rss10, 'description')

    #You can also use tc.keys()
    items = tc.listItems()


    item = items[0]
    #Each item is a (url, order_index) tuple
    url = item[0]

    #Get all the data for the item as a Python dictionary
    item_data = tc.getItem(item)

    title = item_data.get(RSS10_TITLE, "(none)")
    description = item_data.get(RSS10_DESC, "(none)")


    #print "Title:", title
    #print "Description:", item_data.get(RSS10_DESC, "(none)")

    if title.find("Current Conditions") >= 0:
        valueDict = parseCurrentCond(description)

    return valueDict
def getRSSData(stationCode=10326):
    """acquires rss data from wunderground and parses it for current weather conditions at stationCode"""
    """stationCode for Graz: 11240"""

    from RSS import ns, CollectionChannel, TrackingChannel

    #Create a tracking channel, which is a data structure that
    #Indexes RSS data by item URL
    tc = TrackingChannel()

    #Returns the RSSParser instance used, which can usually be ignored
    tc.parse(
        "http://rss.wunderground.com/auto/rss_full/global/stations/%s.xml" %
        stationCode)

    RSS10_TITLE = (ns.rss10, 'title')
    RSS10_DESC = (ns.rss10, 'description')

    #You can also use tc.keys()
    items = tc.listItems()

    item = items[0]
    #Each item is a (url, order_index) tuple
    url = item[0]

    #Get all the data for the item as a Python dictionary
    item_data = tc.getItem(item)

    title = item_data.get(RSS10_TITLE, "(none)")
    description = item_data.get(RSS10_DESC, "(none)")

    #print "Title:", title
    #print "Description:", item_data.get(RSS10_DESC, "(none)")

    if title.find("Current Conditions") >= 0:
        valueDict = parseCurrentCond(description)

    return valueDict
Exemplo n.º 7
0
client = oauth.Client(consumer, token)
search_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=quik_silv"
response, content = client.request(search_url, method="GET")
twitter = json.loads(content)

#Returns the RSSParser instance used, which can usually be ignored
tc = TrackingChannel()
tc.parse("http://physikality.blogspot.com/feeds/posts/default?alt=rss")

RSS10_TITLE = (ns.rss10, 'title')
RSS10_DESC = (ns.rss10, 'description')
RSS10_PUBDATE = (ns.rss10, 'pubDate')
RSS10_IMAGE = (ns.rss10, 'image')

#You can also use tc.keys()
items = tc.listItems()

try:
	con = MySQLdb.connect('localhost', 'kiasukid_admin', '*****@*****.**', 'kiasukid_squarepotato');
	cur = con.cursor()
	#Create a tracking channel, which is a data structure that
	#Indexes RSS data by item URL
	#twitter
	for t in twitter:
		created = t['created_at']
		title = MySQLdb.escape_string(t['text'])
		summary = ''
		link = ''
		image = ''
		source_id = str(t['id'])
		sql = "INSERT INTO streams (type, source_id, title, image, link, summary, created) VALUES ('tweet', '"+source_id+"', '"+title+"', '"+image+"', '"+link+"', '"+summary+"', '"+created+"')"