예제 #1
0
파일: hackurls.py 프로젝트: dataartisan/prg
def get_hackaday_feed():
	valid = []
	tc = TrackingChannel()
	tc.parse("http://hackaday.com/feed/")
	items = tc.listItems()
	
	for i in range(len(items)):
		entry = {}
		item = tc.getItem(items[i])
		entry['title'] = cut_title(item.get((ns.rss10, "title")))
		entry['description'] = item.get((ns.rss10, "description"))
		entry['link'] = item.get((ns.rss10, "link"))
		encoded = item.get((u'http://purl.org/rss/1.0/modules/content/', u'encoded'))
		try:
			img_tags = images_from_html(encoded)
			img_src = img_tags[0]['src']
			if not DEBUG:
				thumb = download_and_thumbnail(img_src)
				if thumb == None:
					continue
				entry['img'] = thumb
			#entry.description = remove_html_tags(entry.description)
			valid.append(entry)
		except IOError:
			# don't care about links with pic we can't thumbnail
			continue
	return valid
    def addRSS(self):
	#Indexes RSS data by item URL
	tc = TrackingChannel()
	StartDate = self.eventdate - datetime.timedelta(days=3)
	EndDate = self.eventdate + datetime.timedelta(days=7)
	
	#Returns the RSSParser instance used, which can usually be ignored
	tc.parse(self.createURL(StartDate,EndDate))

	RSS10_TITLE = (ns.rss10, 'title')
	RSS10_DESC = (ns.rss10, 'description')

	#You can also use tc.keys()
	items = tc.listItems()
	for item in items:
	    #Each item is a (url, order_index) tuple
	    url = item[0]
	    #Get all the data for the item as a Python dictionary
	    item_data = tc.getItem(item)
	    title = item_data.get(RSS10_TITLE, "(none)")
	    desc = item_data.get(RSS10_DESC, "(none)").replace("<br/>","").replace("\n","").replace("\r","").replace("  "," ")
	    for q in self.query.split():
		if(title.lower().find(q.lower()) >= 0 or desc.lower().find(q.lower())):
			if(len(self.results) <= self.rpp):
				self.results.append(SearchResult(title.decode("utf-8"), url.decode("utf-8"), desc.decode("utf-8")))
				break
예제 #3
0
파일: RSSPlaylist.py 프로젝트: dewn49/FreeJ
    def suck_feed(self):
        # Indexes RSS data by item URL
        tc = TrackingChannel()

        # Returns the RSSParser instance used, which can usually be ignored
        print "Fetching ", self.url, " ..."
        tc.parse(self.url)

        RSS10_TITLE = (ns.rss10, "title")
        RSS10_DESC = (ns.rss10, "description")

        self.list = list()

        items = tc.listItems()
        for item in items:
            url = item[0].replace("ol", "dl")
            print url
            self.list.append(url)
예제 #4
0
    def suck_feed(self):
        #Indexes RSS data by item URL
        tc = TrackingChannel()

        #Returns the RSSParser instance used, which can usually be ignored
        print "Fetching ", self.url, " ..."
        tc.parse(self.url)

        RSS10_TITLE = (ns.rss10, 'title')
        RSS10_DESC = (ns.rss10, 'description')

        self.list = list()

        items = tc.listItems()
        for item in items:
            url = item[0].replace("ol", "dl")
            print url
            self.list.append(url)
예제 #5
0
def getRSSData(stationCode=10326):
    """acquires rss data from wunderground and parses it for current weather conditions at stationCode"""
    """stationCode for Graz: 11240"""
    
    from RSS import ns, CollectionChannel, TrackingChannel
    
    #Create a tracking channel, which is a data structure that
    #Indexes RSS data by item URL
    tc = TrackingChannel()

    #Returns the RSSParser instance used, which can usually be ignored
    tc.parse("http://rss.wunderground.com/auto/rss_full/global/stations/%s.xml" % stationCode)


    RSS10_TITLE = (ns.rss10, 'title')
    RSS10_DESC = (ns.rss10, 'description')

    #You can also use tc.keys()
    items = tc.listItems()


    item = items[0]
    #Each item is a (url, order_index) tuple
    url = item[0]

    #Get all the data for the item as a Python dictionary
    item_data = tc.getItem(item)

    title = item_data.get(RSS10_TITLE, "(none)")
    description = item_data.get(RSS10_DESC, "(none)")


    #print "Title:", title
    #print "Description:", item_data.get(RSS10_DESC, "(none)")

    if title.find("Current Conditions") >= 0:
        valueDict = parseCurrentCond(description)

    return valueDict
def getRSSData(stationCode=10326):
    """acquires rss data from wunderground and parses it for current weather conditions at stationCode"""
    """stationCode for Graz: 11240"""

    from RSS import ns, CollectionChannel, TrackingChannel

    #Create a tracking channel, which is a data structure that
    #Indexes RSS data by item URL
    tc = TrackingChannel()

    #Returns the RSSParser instance used, which can usually be ignored
    tc.parse(
        "http://rss.wunderground.com/auto/rss_full/global/stations/%s.xml" %
        stationCode)

    RSS10_TITLE = (ns.rss10, 'title')
    RSS10_DESC = (ns.rss10, 'description')

    #You can also use tc.keys()
    items = tc.listItems()

    item = items[0]
    #Each item is a (url, order_index) tuple
    url = item[0]

    #Get all the data for the item as a Python dictionary
    item_data = tc.getItem(item)

    title = item_data.get(RSS10_TITLE, "(none)")
    description = item_data.get(RSS10_DESC, "(none)")

    #print "Title:", title
    #print "Description:", item_data.get(RSS10_DESC, "(none)")

    if title.find("Current Conditions") >= 0:
        valueDict = parseCurrentCond(description)

    return valueDict
예제 #7
0
#client = oauth.Client(consumer)
#response, content = client.request(instagram_url, method="GET")
#print response, content
#request_token = dict(urlparse.parse_qsl(content) )
#print request_token

#twitter agent
consumer = oauth.Consumer(key="QA1jtASXGEiX7bzyAGZGAA", secret="AvuCDWl8RSFES1d6GyQzzZ7JyYsJTetcquOx3OHjI")
token = oauth.Token('14068158-MZvgwZJAIFB8r1VXn5H1xXlRaSZ8Fj5j0spAqpQg', 'JiJzuOLEoEM9ulDihUlqNSmsPHz2D1RvOEEQXdrb0')
client = oauth.Client(consumer, token)
search_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=quik_silv"
response, content = client.request(search_url, method="GET")
twitter = json.loads(content)

#Returns the RSSParser instance used, which can usually be ignored
tc = TrackingChannel()
tc.parse("http://physikality.blogspot.com/feeds/posts/default?alt=rss")

RSS10_TITLE = (ns.rss10, 'title')
RSS10_DESC = (ns.rss10, 'description')
RSS10_PUBDATE = (ns.rss10, 'pubDate')
RSS10_IMAGE = (ns.rss10, 'image')

#You can also use tc.keys()
items = tc.listItems()

try:
	con = MySQLdb.connect('localhost', 'kiasukid_admin', '*****@*****.**', 'kiasukid_squarepotato');
	cur = con.cursor()
	#Create a tracking channel, which is a data structure that
	#Indexes RSS data by item URL
#This tests the capability to parse the events.rpi.edu RSS Feed
#Items are seperated by URL, Title, and Description just like
#the google results.

#References
#http://wiki.python.org/moin/RssLibraries
#http://www.ibm.com/developerworks/webservices/library/ws-pyth11.html

from RSS import ns, CollectionChannel, TrackingChannel

#Create a tracking channel, which is a data structure that
#Indexes RSS data by item URL
tc = TrackingChannel()

#Returns the RSSParser instance used, which can usually be ignored
tc.parse("http://events.rpi.edu/webcache/v1.0/rssRange/20101001/20101031/list-rss/no--filter.rss")

RSS10_TITLE = (ns.rss10, 'title')
RSS10_DESC = (ns.rss10, 'description')

#You can also use tc.keys()
items = tc.listItems()
for item in items:
    #Each item is a (url, order_index) tuple
    url = item[0]
    print "URL:", url
    #Get all the data for the item as a Python dictionary
    item_data = tc.getItem(item)
    print "Title:", item_data.get(RSS10_TITLE, "(none)")
    print "Description:", item_data.get(RSS10_DESC, "(none)")