def verify_feed_url(url: str) -> bool: """Verifies if a url points to a proper feed.""" try: get_feed(url, "rss") return True except Exception: return False
def get_cinemas(market_id): cinemas = [] feed = get_feed(market_id) for cinema in feed["Market"]["Dates"][0]["Cinemas"]: cinemas.append({ "cinema_name": cinema["CinemaName"], "cinema_id": cinema["CinemaId"] }) return cinemas
def index(): auth_status = auth() if auth_status[0]: if auth_status[1].email_confirmed: return render_template("index.html", signed_in=auth_status[0], user=auth_status[1], feed=feed.get_feed(auth_status[1].id), verified=True) elif not auth_status[1].email_confirmed: return render_template("index.html", signed_in=auth_status[0], user=auth_status[1], feed=feed.get_feed(auth_status[1].id), verified=False) else: return render_template("index.html", signed_in=auth_status[0])
def update_feed(self, feed: Feed): """Gets data for a feed in the queue. Emits data_downloaded_event when the data is retrieved, then sleeps the thread for the duration of the global_refresh_rate.""" try: logging.debug(f"Fetching {feed.uri}") updated_feed, articles = get_feed(feed.uri, feed.template) self.data_downloaded_event.emit(feed, updated_feed, articles) except Exception as exc: logging.error(f"Error parsing feed {feed.uri}, {exc}") time.sleep(self.settings["global_refresh_rate"])
def get_cinema(cinema_id): print "Cinema ID:", cinema_id market_id = "{}00".format(cinema_id[:2]) print "Market ID:", market_id feed = get_feed(market_id) for cinema in feed["Market"]["Dates"][0]["Cinemas"]: if cinema["CinemaId"] == cinema_id: films = [] for film in cinema["Films"]: films.append(Film(film).__dict__) print films cinema["Films"] = films return cinema return None
def get_feed1(username, cur_user): database_active = True if database_active == True: feed_content = feed.get_feed(cur_user) print("feed len", len(feed_content)) else: temp = {} temp['username'] = '******' temp['date'] = '20/03/2019' temp['tags'] = ['ab', 'cd'] temp['content'] = ['Hello Friends'] temp['id'] = 2 feed_content = [temp] return feed_content
def add_feed(self, location: str, folder: Folder) -> None: """Verify that a feed is valid, and adds it to the folder.""" feed, articles = get_feed(location, "rss") feed.uri = location feed.parent_folder = folder feed.template = "rss" # feed_counter should always be free feed.db_id = settings["feed_counter"] settings["feed_counter"] += 1 folder.children.append(feed) self._process_new_articles(feed, articles) self.feeds_updated_event.emit() self._save_feeds()
def get(self, url): f = feed.get_feed(url) if f: story_rows = feed.get_feed_stories(f.id) html = u"<h1>{title}</h1>Updated: {updated}<hr/>".format(**f) for row in story_rows: if "title" in row: html += u'<a href="{1}"><b>{0}</b></a>'.format(row["title"], row["link"]) if "published" in row and row.published: html += u" {0}<br/>".format(row["published"].strftime("%c")) html += "<br/>" if "contents" in row and row.contents: html += u"{0}<br/><hr/>".format(row["contents"]) elif "summary" in row: html += u"{0}<br/><hr/>".format(row.summary) self.write(html) else: self.write("Feed not found")
if len(sys.argv) != 2: print("no feed given") sys.exit(-1) settings = Settings() settings.read_argv() # Start Logger logger.setupLogger(settings) # read and parse filterfiles wordfilter = Filter(settings) wordfilter.read_filterlist("./blackwordlist.txt") wordfilter.read_filterlist(settings.sitename) # Parse feed feed = get_feed(settings.feedfile) # For now we use the language without any regional variants lang = feed.lang.split("-")[0] loaded_plugins = [] for plugin in plugins.plugins: loaded_plugins.append(plugin(settings.url)) for child in feed: if child.deleted: continue # Check for duplicates max_similarity = 0 child.wordlist: Tuple[Counter, float] = comparetext.analyse(lang, child.title, str(child.description),
def get_feed1(username, cur_user): feed_content = feed.get_feed(cur_user) return feed_content
original_file_name, file_extension = os.path.splitext(file_link) pub_date = datetime.strptime(item['pubDate'][0:16], '%a, %d %b %Y') if item['pubDate'] else None pub_date = pub_date.strftime('%Y-%m-%d') if pub_date else None file_name = item['title'] file_name = file_name.replace(':', ' -') file_name = pub_date + ' - ' + file_name + file_extension return file_name PODCAST_FEEDS = [ ( 'http://feeds.feedburner.com/filmjunk?format=xml', # Feed '/volume1/media/podcasts/Film Junk Podcast', # Destionation fj_formatter), # Filename formatter ] if __name__ == '__main__': for feed in PODCAST_FEEDS: for item in get_feed(feed[0]): logger.info('> %s - %s - %s' % (item['pubDate'], item['title'], item['link'])) formatter = feed[2](item) if feed[1] else None download_file(item['link'], destination=feed[1], filename=formatter)