def iterentries(cls, limit=None): if not hasattr(cls, 'id'): cls.id = str(uuid.uuid4()) id = cls.id url = cls.url if moksha.common.utils.feed_cache: feed = moksha.common.utils.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) cls.title = feed.headers.get('status') cls.link = feed.feed.get('link') return cls.link = feed.feed.get('link') try: cls.title = feed.feed.title except AttributeError: cls.title = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(self, d=None, limit=None): url = self.url or d.get('url') id = d and d.get('id', self.id) or self.id if moksha.utils.feed_cache: feed = moksha.utils.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) if d: d['title'] = feed.headers.get('status') d['link'] = feed.feed.get('link') return if d: d['link'] = feed.feed.get('link') try: d['title'] = feed.feed.title except AttributeError: d['title'] = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(cls, limit=None): if not hasattr(cls, 'id'): cls.id = str(uuid.uuid4()) id = cls.id url = cls.url if not cls.url: raise ValueError("Feed must be supplied with a url.") global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) cls.title = feed.headers.get('status') cls.link = feed.feed.get('link') return cls.link = feed.feed.get('link') try: cls.title = feed.feed.title except AttributeError: cls.title = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(self, d=None, limit=None): url = self.url or d.get('url') id = d and d.get('id', self.id) or self.id if moksha.feed_cache: feed = moksha.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) if d: d['title'] = feed.headers.get('status') d['link'] = feed.feed.get('link') return if d: d['link'] = feed.feed.get('link') try: d['title'] = feed.feed.title except AttributeError: d['title'] = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def show_recent_tracks(lastfm_user): storage = CacheDjango(cache) fc = Cache(storage) d = fc.fetch("http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss" % lastfm_user) tracks = d.entries for track in tracks: track.updated = datetime.fromtimestamp(mktime(track.updated_parsed)) (track.artist, track.title) = track.title.split(u' \u2013 ', 1) return {'tracks': tracks}
def getFeed(self, url, post_data=None, request_headers=None, items=[]): parsed = list(urlparse.urlparse(url)) parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one if post_data: url += urllib.urlencode(post_data) data = dict.fromkeys(items, None) try: fc = Cache(self.rssDB) resp = fc.fetch(url, False, False, request_headers=request_headers) for item in items: try: data[item] = resp[item] except: continue finally: self.rssDB.close() return data
def getFeed(self, url, post_data=None, request_headers=None, items=[]): parsed = list(urlparse.urlparse(url)) parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one if post_data: url += urllib.urlencode(post_data) data = dict.fromkeys(items, None) try: fc = Cache(self.rssDB) resp = fc.fetch(url, False, False, request_headers) for item in items: try: data[item] = resp[item] except: continue finally: self.rssDB.close() return data