def iterentries(cls, limit=None): if not hasattr(cls, 'id'): cls.id = str(uuid.uuid4()) id = cls.id url = cls.url if moksha.common.utils.feed_cache: feed = moksha.common.utils.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) cls.title = feed.headers.get('status') cls.link = feed.feed.get('link') return cls.link = feed.feed.get('link') try: cls.title = feed.feed.title except AttributeError: cls.title = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(self, d=None, limit=None): url = self.url or d.get('url') id = d and d.get('id', self.id) or self.id if moksha.utils.feed_cache: feed = moksha.utils.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) if d: d['title'] = feed.headers.get('status') d['link'] = feed.feed.get('link') return if d: d['link'] = feed.feed.get('link') try: d['title'] = feed.feed.title except AttributeError: d['title'] = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(cls, limit=None): if not hasattr(cls, 'id'): cls.id = str(uuid.uuid4()) id = cls.id url = cls.url if not cls.url: raise ValueError("Feed must be supplied with a url.") global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) cls.title = feed.headers.get('status') cls.link = feed.feed.get('link') return cls.link = feed.feed.get('link') try: cls.title = feed.feed.title except AttributeError: cls.title = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def iterentries(self, d=None, limit=None): url = self.url or d.get('url') id = d and d.get('id', self.id) or self.id if moksha.feed_cache: feed = moksha.feed_cache.fetch(url) else: # MokshaMiddleware not running, so setup our own feed cache. # This allows us to use this object outside of WSGI requests. global feed_cache, feed_storage if not feed_cache: feed_storage = Shove('sqlite:///feeds.db', compress=True) feed_cache = Cache(feed_storage) feed = feed_cache.fetch(url) if not (200 <= feed.get('status', 200) < 400): log.warning('Got %s status from %s: %s' % ( feed['status'], url, feed.headers.get('status'))) if d: d['title'] = feed.headers.get('status') d['link'] = feed.feed.get('link') return if d: d['link'] = feed.feed.get('link') try: d['title'] = feed.feed.title except AttributeError: d['title'] = 'Unable to parse feed' return for i, entry in enumerate(feed.get('entries', [])): entry['uid'] = '%s_%d' % (id, i) entry['link'] = entry.get('link') if i == limit: break yield entry
def show_recent_tracks(lastfm_user): storage = CacheDjango(cache) fc = Cache(storage) d = fc.fetch("http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss" % lastfm_user) tracks = d.entries for track in tracks: track.updated = datetime.fromtimestamp(mktime(track.updated_parsed)) (track.artist, track.title) = track.title.split(u' \u2013 ', 1) return {'tracks': tracks}
def __init__(self, application): log.info('Creating Moksha Middleware') self.application = application self.mokshaapp = MokshaAppDispatcher(application) moksha.utils._apps = { } # {'app name': tg.TGController/tg.WSGIAppController} moksha.utils._widgets = {} # {'widget name': tw.api.Widget} moksha.utils.menus = {} # {'menu name': moksha.api.menus.MokshaMenu} self.engines = {} # {'app name': sqlalchemy.engine.base.Engine} self.load_paths() self.load_renderers() self.load_configs() self.load_widgets() self.load_applications() self.load_wsgi_applications() self.load_models() self.load_menus() self.load_root() try: moksha.utils.feed_storage = Shove( config.get('feed_store', 'simple://'), config.get('feed_cache', 'simple://'), compress=True) moksha.utils.feed_cache = Cache(moksha.utils.feed_storage) except Exception, e: log.error(str(e)) log.error("Unable to initialize the Feed Storage")
def getFeed(self, url, post_data=None, request_headers=None, items=None, handlers=[]): if post_data: url += urllib.urlencode(post_data) try: resp = Cache(self.rssDB, userAgent=sickbeard.common.USER_AGENT).fetch(url, force_update=True, request_headers=request_headers, handlers=handlers) finally: self.rssDB.close() return resp
def getFeed(self, url, post_data=None, request_headers=None, items=None): if post_data: url += urllib.urlencode(post_data) try: resp = Cache(self.rssDB).fetch(url, force_update=True, request_headers=request_headers) finally: self.rssDB.close() return resp
def getFeed(self, url, post_data=None, request_headers=None, items=[]): parsed = list(urlparse.urlparse(url)) parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one if post_data: url += urllib.urlencode(post_data) data = dict.fromkeys(items, None) try: fc = Cache(self.rssDB) resp = fc.fetch(url, False, False, request_headers=request_headers) for item in items: try: data[item] = resp[item] except: continue finally: self.rssDB.close() return data
def getFeed(self, url, post_data=None, request_headers=None, items=[]): parsed = list(urlparse.urlparse(url)) parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one if post_data: url += urllib.urlencode(post_data) data = dict.fromkeys(items, None) try: fc = Cache(self.rssDB) resp = fc.fetch(url, False, False, request_headers) for item in items: try: data[item] = resp[item] except: continue finally: self.rssDB.close() return data
def clearCache(self, age=None): try: fc = Cache(self.rssDB).purge(age) fc.purge(age) finally: self.rssDB.close()
def initialize_feed_storage(config): moksha.common.utils.feed_storage = Shove( config.get('feed_store', 'simple://'), config.get('feed_cache', 'simple://'), compress=True) moksha.common.utils.feed_cache = Cache(moksha.common.utils.feed_storage)