def rss(): fg = FeedGenerator() fg.id('http://dogear-2112.herokapp.com') fg.title('Dogear Newsfeed') fg.author({'name': 'Haley Cohen', 'email': '*****@*****.**'}) fg.link(href='http://dogearnews.com', rel='alternate') fg.logo( 'https://www.fixpocket.com/public_assets/uploads/beats/1523422664maxresdefault.jpg' ) fg.subtitle('The Dogear Social Newsfeed') fg.link(href='http://localhost:5000/rss/index.rss', rel='self') fg.language('en') news = News.query.order_by("created_at desc").limit(25).all() for n in news: fe = fg.add_entry() fe.id(n.url) fe.title(n.title) fe.link(href=n.url) fe.enclosure(n.picture_url, 0, 'image/jpeg') #fe.pubDate(n.created_at) rssfeed = fg.rss_str(pretty=True) response = make_response(rssfeed) response.headers['Content-Type'] = 'application/rss+xml' return response
def main(): config = __import__('config') user = MyPlexUser.signin(config.username, config.password) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: for resource in user.resources(): executor.submit(parse_resource, user, resource) print('Servers data parsed') paris = pytz.timezone('Europe/Paris') ordered = reversed(sorted(data, key=lambda x: paris.localize(x.addedAt))) print('Media sorted') fg = FeedGenerator() fg.id('http://satreix.fr/feeds/plex.rss') fg.generator('plex-feed') fg.title('PLEX feed') fg.subtitle('Newly added media content') fg.author({'name': 'satreix', 'email': '*****@*****.**'}) fg.link(href='http://satreix.fr', rel='alternate') fg.logo('https://plex.tv/assets/img/googleplus-photo-cb6f717c8cfd8b48df6dbb09aa369198.png') fg.link(href='http://satreix.fr/feeds/plex.rss', rel='self') fg.language('en') for elt in list(ordered)[:50]: fe = fg.add_entry() fe.id(elt.getStreamUrl()) fe.title('{} - {}'.format(elt.title, elt.server.friendlyName)) fe.pubdate(paris.localize(elt.addedAt)) fe.description(elt.summary, True) fg.rss_file('plex.rss', pretty=True) print('File written')
def getcomments(): req = requests.get('http://localhost/article', json={"count": 10}, auth=('*****@*****.**', 'adminpassword')) value = req.json() fg = FeedGenerator() fg.title('A full feed') fg.link(href='http://localhost/feed/summary/comments') fg.subtitle('This is a cool feed!') fg.language('en') for x in value: split = x.get('articles_url', '').split('/') reqcomment = requests.get('http://localhost/comment/get/' + split[2], json={"count": 30}, auth=('*****@*****.**', 'adminpassword')) fe = fg.add_entry() fe.content(x.get('articles_url', '')) if reqcomment.status_code == 200: for v in reqcomment.json(): fe.category(term=v) else: fe.category(term="None") fg.rss_file('comments.xml') return "YOUR COMMENTS WAS CREATED comments.xml", 200
def feed(request): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] root = address_info['root'] all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'{root}/') fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"{root}/feed/", rel='self') fg.link(href=root, rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{root}/{post.slug}") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{root}/feed") fe.content(unmark(post.content)) atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def writeRSS(papers, output): fg = FeedGenerator() fg.id(RSS_URL) fg.title(RSS_TITLE) fg.subtitle(RSS_SUBTITLE) fg.author(RSS_AUTHOR) fg.link(href='http://www.vldb.org/pvldb/', rel='alternate') fg.language('en') for p in papers: summary = "%(title)s\nAuthors: %(authors)s\nPVLDB Volume %(volume)d, Number %(number)d" % p fe = fg.add_entry() fe.author(name=p["authors"]) fe.title(p["title"]) fe.link(href=p["link"]) fe.id(p["link"]) fe.published(published=p["published"]) fe.description(description=summary, isSummary=True) ## FOR atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string atom_file = os.path.join(output, 'pvldb-atom.xml') fg.atom_file(atom_file) # Write the ATOM feed to a file LOG.info("Created ATOM '%s'" % atom_file) rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string rss_file = os.path.join(output, RSS_FILE) fg.rss_file(rss_file) # Write the RSS feed to a file LOG.info("Created RSS '%s'" % rss_file)
class RssHistory(View): """RSS History Page Controller""" @redirect_if_not_installed def get(self, request): self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else "" self.__fg = FeedGenerator() self.__context = Context() self.__option_entity = OptionEntity() self.__context.autoload_options() self.__context.push({ "page_title": self.__context.get("app_name", os.getenv("APP_NAME", "Silverback")), "is_authenticated": request.user and request.user.is_authenticated }) self.__fg.id('http://silverbackhq.org') self.__fg.title('Some Testfeed') self.__fg.author({'name': 'John Doe', 'email': '*****@*****.**'}) self.__fg.link(href='http://example.com', rel='alternate') self.__fg.logo('http://ex.com/logo.jpg') self.__fg.subtitle('This is a cool feed!') self.__fg.link(href='http://silverbackhq.org/test.atom', rel='self') self.__fg.language('en') return HttpResponse(self.__fg.atom_str(), content_type='text/xml')
def generate_feed(): ''' render feed ''' from feedgen.feed import FeedGenerator feed_name = 'feed_name' fg = FeedGenerator() fg.id('xxxurl/' + feed_name) fg.title(feed_name) fg.link(href='xxxurl/' + feed_name, rel='alternate') # fg.logo('http://ex.com/logo.jpg') fg.subtitle('by FeedGenerator') fg.link(href='xxxurl/' + feed_name + 'atom', rel='self') fg.language('zh-cn') for page in sorted(pages): fe = fg.add_entry() fe.id(page.metadata['url']) fe.title(page.metadata['title']) fe.link(href=page.metadata['url']) fe.description('\n\n' + page.to_html() + '\n') return fg.rss_str(pretty=True) # 或者 if feed_path: return Response(open(feed_path, encoding='utf-8').read(), mimetype='application/xml') else: abort(404)
def _build_rss(self, messages, rss_file, atom_file): f = FeedGenerator() f.id(self.config["site_url"]) f.generator("tg-archive {}".format( pkg_resources.get_distribution("tg-archive").version)) f.link(href=self.config["site_url"], rel="alternate") f.title(self.config["site_name"].format(group=self.config["group"])) f.subtitle(self.config["site_description"]) for m in messages: url = "{}/{}#{}".format(self.config["site_url"], self.page_ids[m.id], m.id) e = f.add_entry() e.id(url) e.title("@{} on {} (#{})".format(m.user.username, m.date, m.id)) e.description(self._make_abstract(m)) if m.media and m.media.url: murl = "{}/{}/{}".format( self.config["site_url"], os.path.basename(self.config["media_dir"]), m.media.url) e.enclosure(murl, 0, "application/octet-stream") f.rss_file(os.path.join(self.config["publish_dir"], "index.xml")) f.atom_file(os.path.join(self.config["publish_dir"], "index.atom"))
def init_feed_generator(feed): feed_generator = FeedGenerator() feed_generator.load_extension('podcast') feed_generator.title("PocketCast") feed_generator.link(href=feed.feed.link, rel='alternate') feed_generator.subtitle(feed.feed.description or 'PocketCast') return feed_generator
def render_feed(root_path: str, link_page_rows: List[LinkRow]): logger.info("Rendering feed outputs.") feed = FeedGenerator() feed.id(ENV["SITE_URL"]) feed.title(ENV["SITE_TITLE"]) feed.link(href=ENV["SITE_URL"], rel="alternate") feed.subtitle(ENV["SITE_DESC"]) feed.link(href=urljoin(ENV["SITE_URL"], "feed.rss"), rel="self") feed.language("tr") links = get_links_by_date(link_page_rows) for link in links: entry = feed.add_entry() entry.id(link.file_path) entry.title(link.title) entry.description(link.desc) entry.link( title=link.title, rel="alternate", type="text/html", href=urljoin(ENV["SITE_URL"], link.file_path), ) entry.updated(link.create_time) feed.rss_file(join(root_path, "rss.xml"), pretty=True) feed.atom_file(join(root_path, "atom.xml"), pretty=True)
def generate_feed(config: Config, songs: List[Dict]) -> None: assert config.base_url, "Base URL is required to generate a feed!" feed_name = "index.xml" base_url = config.base_url fg = FeedGenerator() fg.load_extension("podcast") fg.id(base_url) fg.title(config.title) fg.subtitle(config.description) fg.link(href=base_url, rel="alternate") artists = {s["artist"] for s in songs if s.get("artist")} fg.author([{"name": artist} for artist in artists]) fg.logo(urljoin(base_url, "favicon.ico")) fg.link(href=urljoin(base_url, feed_name), rel="self") fg.language("en") for song in songs[::-1]: fe = fg.add_entry() song_url = urljoin(base_url, song["src"]) fe.id(song_url) fe.title(song["title"]) fe.description(DESCRIPTION_TEMPLATE.render(song=song).strip()) fe.link(href=urljoin(base_url, f'#{song["src"]}')) fe.enclosure(song_url, song["filesize"], "audio/mpeg") fe.published(entry_date(song["date"])) fg.rss_file(os.path.join(config.out_dir, feed_name))
def gen_feed(title, author, feed_url, url, subtitle, logo, categories=None, album=False, licenses=False): fg = FeedGenerator() fg.load_extension("podcast") fg.id(feed_url) fg.title(title) fg.author(author) fg.link(href=url) fg.link(href=feed_url, rel="self") fg.logo(logo) fg.subtitle(subtitle) fg.language("en") fg.generator(generator="reel2bits", uri=f"https://{current_app.config['AP_DOMAIN']}", version=g.cfg["REEL2BITS_VERSION"]) if album and categories: fg.podcast.itunes_category(categories[0]) fg.category([{"term": c, "label": c} for c in categories]) if licenses: fg.rights("See individual tracks: " + ", ".join(licenses)) return fg
def generate_rss(self): domain_url = self.settings.domain_url fg = FeedGenerator() fg.id(f"{domain_url}/index.html") fg.title(self.name) fg.link(href=f'{domain_url}/index.html', rel='self') fg.subtitle(self.name) fg.language('en') for article in self._articles: fe = fg.add_entry() fe.id(article.url) fe.title(article.title) fe.summary(article.summary) fe.link(href=article.url) fe.author(name=article.author_name, email=article.author_email) fe.category(category=[{ 'term': category } for category in article.categories]) fe.enclosure(url=article.header_image, type=mimetypes.guess_type(article.header_image)[0] or '') fe.published(article.timestamp) fg.rss_file(str(self.workdir / 'rss.xml'))
def feed(request): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] root = address_info['root'] all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'http://{root}/') fg.author({'name': blog.subdomain, 'email': blog.user.email}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"http://{root}/", rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"http://{root}/{post.slug}/") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': blog.user.email}) fe.link(href=f"http://{root}/feed/") fe.content(unmark(post.content)) fe.updated(post.published_date) if request.GET.get('type') == 'rss': fg.link(href=f"http://{root}/feed/?type=rss", rel='self') rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type='application/rss+xml') else: fg.link(href=f"http://{root}/feed/", rel='self') atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def feed(request): http_host = request.META['HTTP_HOST'] if http_host == 'bearblog.dev' or http_host == 'www.bearblog.dev' or http_host == 'localhost:8000': return redirect('/') elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host: extracted = tldextract.extract(http_host) blog = get_object_or_404(Blog, subdomain=extracted.subdomain) root = get_root(blog.subdomain) else: blog = get_object_or_404(Blog, domain=http_host) root = http_host all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'{root}/') fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"{root}/feed/", rel='self') fg.link(href=root, rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{root}/{post.slug}") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{root}/feed") fe.content(unmark(post.content)) atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def main(): client = moduleSocial.connectTumblr() posts = client.posts('fernand0') fg = FeedGenerator() fg.id(posts['blog']['url']) fg.title(posts['blog']['title']) fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} ) fg.link( href=posts['blog']['url'], rel='alternate' ) fg.subtitle('Alternate feed due to Tumblr GDPR restrictions') fg.language('en') print(len(posts['posts'])) for i in range(len(posts['posts'])): fe = fg.add_entry() print(posts['posts'][i]['post_url']) if 'title' in posts['posts'][i]: title = posts['posts'][i]['title'] print('T', posts['posts'][i]['title']) else: title = posts['posts'][i]['summary'].split('\n')[0] print('S', posts['posts'][i]['summary'].split('\n')[0]) fe.title(title) fe.link(href=posts['posts'][i]['post_url']) fe.id(posts['posts'][i]['post_url']) print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml')) sys.exit()
def feed(request): blog = resolve_address(request) if not blog: raise Http404("Blog does not exist") all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(blog.useful_domain()) fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title) fg.link(href=f"{blog.useful_domain()}/", rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{blog.useful_domain()}/{post.slug}/") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{blog.useful_domain()}/{post.slug}/") fe.content(clean_text(mistune.html(post.content)), type="html") fe.published(post.published_date) fe.updated(post.published_date) if request.GET.get('type') == 'rss': fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self') rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type='application/rss+xml') else: fg.link(href=f"{blog.useful_domain()}/feed/", rel='self') atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def GET(self): cherrypy.response.headers["Access-Control-Allow-Origin"] = "*" fg = FeedGenerator() #TODO create icon # fg.icon('http://www.det.ua.pt') fg.id(config.get('rss','id')) fg.title(config.get('rss','title')) fg.subtitle(config.get('rss','subtitle')) fg.description(config.get('rss','description')) fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')}) fg.language(config.get('rss','language')) fg.link(href=config.get('rss','href'), rel='related') client = EmailClient() for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]): cherrypy.log("RSS Entry: "+msgn) em = client.getEMail(msgn) entry = fg.add_entry() entry.title(em['subject']) entry.author({'name': em['From']['name'], 'email': em['From']['email']}) entry.guid(config.get("main","baseurl")+'news/'+msgn) entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'}) entry.pubdate(em['date']) entry.content(em['body']) return fg.rss_str(pretty=True)
def route(user): init_session() fg = FeedGenerator() fg.title(session['site_name']) fg.subtitle('Trash bag of links') fg.generator(session['site_name']) fg.link(href=get_config_value('address')) fg.logo(f'{get_config_value("address")}/static/img/logo.png') fg.language('en') rss_posts = get_all_posts_for_rss( ) if user is None else get_all_user_posts_for_rss(user) for post in reversed(rss_posts): post_title = Markup((post.title[:60] + '...') if len(post.title) > 60 else post.title) fe = fg.add_entry() fe.title(f'[{post.tags}] {post_title}') fe.link(href=html.unescape(Markup(post.url))) fe.author(name=post.username) fe.pubDate( datetime.datetime.fromtimestamp(post.datetime).strftime('%c +10')) fe.updated( datetime.datetime.fromtimestamp(post.updated).strftime('%c +10')) response = make_response(fg.rss_str(pretty=True)) response.headers.set('Content-Type', 'application/rss+xml') return response
def generate_rss_from_articles(feed_settings, articles): """ Creates a FeedGenerator feed from a set of feed_entries. :param feed_settings: a feed_settings object containing :param articles: :return: """ # create the feed output_feed = FeedGenerator() # add metadata to the feed # TODO this feels like it can be done without output_rss on every line but...Python newbie output_feed.title(feed_settings.title) output_feed.author(feed_settings.author) output_feed.link(href=feed_settings.source_page_url, rel='alternate') output_feed.link(href=feed_settings.output_url, rel='self') output_feed.logo(feed_settings.logo_img_url) output_feed.subtitle(feed_settings.subtitle) output_feed.language(feed_settings.language) # output_rss.id(UM_SOMETHING) # add each feed item for article in articles: feed_entry_added = output_feed.add_entry() feed_entry_added.id(article.link) # ATOM # guid for RSS? feed_entry_added.link(href=article.link, rel='alternate') # ATOM feed_entry_added.title(article.title) feed_entry_added.description(article.description) # feed_entry_added.link(article.link) return output_feed
def generateFeeds(buffered, meta): utc = pytz.utc fg = FeedGenerator() fg.id(meta['id']) fg.title(meta['title']) fg.author(meta['author']) fg.subtitle(meta['subtitle']) fg.link( href=meta['link'], rel='self' ) fg.language(meta['language']) for tweet in buffered: fe = fg.add_entry() fe.id(tweet['url'].decode('utf-8')) fe.published(utc.localize(tweet['created_at']).astimezone(pytz.timezone(locale))) #fe.guid(tweet['url'].decode('utf-8')) fe.link(href=tweet['url'].decode('utf-8'), rel='alternate') fe.title(tweet['readable_title']) fe.description(tweet['readable_article']) try: fe.author({'name': '', 'email':tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8')}) except Exception, e: logger.error(e) fe.author({'name': 'a', 'email':'*****@*****.**'})
def init_feed() -> None: """ This function initialises the RSS feed with the correct attributes. """ log.debug("Initialising the feed...") global fg try: fg = FeedGenerator() # Setup [root] feed attributes fg.id("https://www.dagzure.com/dynamics_feed.xml") fg.title("Dynamics 365 Feeds") fg.generator("SingleRSS/v1.0.1") fg.link(href="https://www.dagzure.com/dynamics_feed.xml", rel="self") fg.subtitle("Combined feed for RSS feeds") fg.language('en') except: log.error("Error initialising the feed!") sys.exit(1) log.debug("Feed initialised!") return None
def cache_rss_latest(user_slug): articles = data.userDocumentLastChanged_list(user_slug) netloc = bottle.request.urlparts.netloc fg = FeedGenerator() fg.id(abs_url(bottle.request, '/user/%s' % user_slug)) fg.title('Nigel Chapman (%s)' % netloc) fg.subtitle('Long reads on Christian thought') # <-- Set METADATA for this # fg.author( {'name':'Nigel Chapman','email':'*****@*****.**'} ) fg.logo('https://%s/static/site-image.png' % (netloc)) fg.link(href='https://%s' % netloc, rel='self') # fg.link(href='https://%s/rss/%s.xml' % (netloc, user_slug), rel='self') fg.language('en') fg.ttl(24 * 3600) for a in articles: fe = fg.add_entry() article_uri = 'read/%s/%s' % (a['user'], a['slug']) fe.id(abs_url(bottle.request, article_uri)) fe.title(a['title']) fe.description(a['summary']) fe.link(href=abs_url(bottle.request, article_uri)) fe.author(name=a['email'], email=a['author']) # <-- Wierdly backwards fe.published(a['published_time']) feed_xml = fg.rss_str(pretty=True) return feed_xml
def generateFeeds(buffered, meta): utc = pytz.utc fg = FeedGenerator() fg.id(meta['id']) fg.title(meta['title']) fg.author(meta['author']) fg.subtitle(meta['subtitle']) fg.link(href=meta['link'], rel='self') fg.language(meta['language']) for tweet in buffered: fe = fg.add_entry() fe.id(tweet['url'].decode('utf-8')) fe.published( utc.localize(tweet['created_at']).astimezone( pytz.timezone(locale))) #fe.guid(tweet['url'].decode('utf-8')) fe.link(href=tweet['url'].decode('utf-8'), rel='alternate') fe.title(tweet['readable_title']) fe.description(tweet['readable_article']) try: fe.author({ 'name': '', 'email': tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8') }) except Exception, e: logger.error(e) fe.author({'name': 'a', 'email': '*****@*****.**'})
def feed(): """ Generate atom feed """ entries = parse_posts(0, C.feed_count) fg = FeedGenerator() fg.id(str(len(entries))) fg.title(C.title) fg.subtitle(C.subtitle) fg.language(C.language) fg.author(dict(name=C.author, email=C.email)) fg.link(href=C.root_url, rel='alternate') fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self') for entry in entries: fe = fg.add_entry() fe.id(entry.get('url')) fe.title(entry.get('title')) fe.published(entry.get('date')) fe.updated(entry.get('updated') or entry.get('date')) fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate') fe.author(dict(name=entry.get('author'), email=entry.get('email'))) fe.content(entry.get('body')) atom_feed = fg.atom_str(pretty=True) return atom_feed
def build_feed(directory, time_func, base_url, output="atom.xml", n=10, title="", subtitle="", author="", email="", verbose=False): """ Build an Atom feed for all world readable Gemini files in the current directory, and write it to atom.xml. """ # If a title hasn't been provided, try to get one from an index page if not title: title = get_feed_title(directory) # Let user know feed title and URL feed_url = urljoin(base_url, output) if verbose: print('Generating feed "{}", which should be served from {}'.format( title, feed_url)) # Setup feed feed = FeedGenerator() feed.id(base_url) feed.title(title) if subtitle: feed.subtitle(subtitle) author_details = {} if author: author_details["name"] = author if email: author_details["email"] = email if author_details: feed.author(author_details) feed.link(href=feed_url, rel='self') feed.link(href=base_url, rel='alternate') # Add one entry per .gmi file files = find_files(directory, time_func, n) if not files: if verbose: print("No world-readable Gemini content found! :(") return for n, filename in enumerate(files): entry = feed.add_entry() populate_entry_from_file(filename, base_url, entry, time_func) if n == 0: feed.updated(entry.updated()) if verbose: print("Adding {} with title '{}'...".format( os.path.basename(filename), entry.title())) # Write file output = os.path.join(directory, output) feed.atom_file(output, pretty=True) if verbose: print("Wrote Atom feed to {}.".format(output))
def recent_feed(): fg = FeedGenerator() fg.title(config.config["title"]) fg.subtitle(config.config["title"] + " Atom Feed") fg.link( { "href": config.config["url"] + "/feed.rss", "rel": "self", "type": "application/rss+xml", } ) for page in pages: if not page.meta.get("ispage"): fe = fg.add_entry() fe.title(page["title"]) fe.description((str(page.__html__()))) fe.link({"href": config.config["url"] + "/posts/" + page.path}) fe.guid(str(uuid.uuid4())) fe.author({"name": config.config["author"]}) fe.updated( datetime.datetime.combine( page["date"], datetime.datetime.min.time(), tzinfo=LOCAL_TIMEZONE ) ) fe.published( datetime.datetime.combine( page["date"], datetime.datetime.min.time(), tzinfo=LOCAL_TIMEZONE ) ) response = make_response(fg.rss_str(pretty=True)) response.headers.set("Content-Type", "application/rss+xml") return response
def create_rss_feed(filename='rss-new.xml'): fg = FeedGenerator() fg.id(config.get("DEFAULT", "WEBSITE")) fg.title(config.get("DEFAULT", "RSSTITLE")) fg.description(config.get("DEFAULT", "RSSDESCRIPTION")) fg.author ( { "name": config.get("DEFAULT", "AUTHOR"), "email": config.get("DEFAULT", "EMAIL") } ) fg.link(href=config.get("DEFAULT", "RSSLINK"), rel='self' ) fg.subtitle(config.get("DEFAULT", "RSSSUBTITLE")) fg.language(config.get("DEFAULT", "LANGUAGE")) feed = get_feed_list() for entry in feed: fe = fg.add_entry() fe.id (entry.id) fe.title(entry.title) fe.description(entry.description) fe.published(entry.published) fe.pubDate(entry.published) fe.link(href=entry.id) # Uncomment the bottom if you want an Atom RSS file created instead of an rss.xml # atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string # fg.atom_file('atom-new.xml') # Write the ATOM feed to a file rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string fg.rss_file(filename) # Write the RSS feed to a file return fg
def generate_feed_from_episodes(episodes): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://dannyshaw.github.io/podcast-feeds') fg.title('Seinfeld Complete Audio') fg.link(href='http://dannyshaw.github.io/podcast-feeds', rel='alternate') fg.subtitle('I\'ve seen them enough, audio is all I need.') fg.link( href= 'https://raw.githubusercontent.com/dannyshaw/podcast-feeds/master/podcast-feeds/seinfeld.xml', rel='self') fg.language('en') for index, ep in enumerate(episodes): file_size = getsize(join(FILES, ep)) fe = fg.add_entry() fe.id(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}') fe.title(ep) fe.description(ep) pub_date = datetime(1999, 1, 1, tzinfo=timezone.utc) + timedelta(index) fe.pubDate(pub_date) fe.link(href=f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}') fe.enclosure(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}', f'{file_size}', 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file('seinfeld.xml')
def feed(): root_url = request.url_root.rstrip("/") home_full_url = root_url + url_for(".index") feed_full_url = root_url + url_for(".feed") site = app.config["SITE_INFO"] site_tz = s2tz(site["timezone"]) or timezone(timedelta()) # set feed info feed_gen = FeedGenerator() feed_gen.id(home_full_url) feed_gen.title(site.get("title", "")) feed_gen.subtitle(site.get("subtitle", "")) if "author" in site: feed_gen.author(name=site["author"]) feed_gen.link(href=home_full_url, rel="alternate") feed_gen.link(href=feed_full_url, rel="self") # add feed entries posts = load_posts(meta_only=True)[:10] for i in range(len(posts)): p = load_post(posts[i]["filename"]) if not p: continue feed_entry = feed_gen.add_entry() feed_entry.id(root_url + p["url"]) feed_entry.link(href=root_url + p["url"]) feed_entry.title(p["title"]) feed_entry.content(p["content"]) feed_entry.published(p["created"].replace(tzinfo=site_tz)) feed_entry.updated( p.get("updated", p["created"]).replace(tzinfo=site_tz)) if "author" in p: feed_entry.author(name=p["author"]) # make http response resp = make_response(feed_gen.atom_str(pretty=True)) resp.content_type = "application/atom+xml; charset=utf-8" return resp
def generate_feed(output_file, exclude_highlights=True): # Parse RSS feed d = feedparser.parse(ESPN_RSS_FEED) IMAGE_URL = d.feed.image["href"] # RSS feed generation fg = FeedGenerator() fg.load_extension("podcast", rss=True) ## RSS tags # Required fg.title(d.feed.title) fg.link(href="https://github.com/aaearon/lebatard-show-rss") fg.description(d.feed.description) # Optional fg.language(d.feed.language) fg.image(IMAGE_URL) fg.subtitle(d.feed.subtitle) # iTunes fg.podcast.itunes_author(d.feed.author) fg.podcast.itunes_category(itunes_category=d.feed.category) fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit="clean") fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"]) tz = pytz.timezone("America/Los_Angeles") for e in d.entries: if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600: pass else: fe = fg.add_entry() fe.id(e.id) fe.title(e.title) fe.description(e.description) fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"]) fe.podcast.itunes_summary(e.description) fe.podcast.itunes_subtitle(e.description) fe.podcast.itunes_duration(e["itunes_duration"]) dt = datetime.fromtimestamp(time.mktime(e.published_parsed)) date = tz.localize(dt) # Local hour if "Show: " in e.title: fe.published(date) elif "Hour 1" in e.title: fe.published(date + timedelta(hours=1)) elif "Hour 2" in e.title: fe.published(date + timedelta(hours=2)) elif "Hour 3" in e.title: fe.published(date + timedelta(hours=3)) else: fe.published(date + timedelta(hours=-1)) fg.rss_str(pretty=True) fg.rss_file(output_file)
def podcast_feed_generator(): """This should be optimized and constructed only once.""" fg = FeedGenerator() fg.id('martialcoder') fg.title("Suraj Sahani") fg.link(href='http://www.surajsahani.github.io') fg.author({ 'name': 'Suraj Sahani', 'email': '*****@*****.**' }) fg.subtitle('Things that make my mind go bing!') fg.language('en') fg.description( """My corner of the Great WWW where I talk about things I relate to.""" ) podcasts = get_static_json('static/podcasts/podcasts.json') for podcast in podcasts: fe = fg.add_entry() fe.id(podcast['url']) fe.title(podcast['title']) fe.description(podcast['description']) fe.enclosure(podcast['url'], 0, 'audio/mpeg') return fg
def rss(_): """Returns the XML content of my RSS feed for the music part of the website. NOTE: We are doing no caching here at all right now, because this function is very fast and the website has no traffic. If this situation changes, then I should cache it so that I don't build this object from scratch every time.""" generator = FeedGenerator() # Add basic metadata. generator.title("Paul's Music Feed") generator.author(name=MY_NAME, email=MY_EMAIL) generator.contributor(name=MY_NAME, email=MY_EMAIL) # RSS requires that we point to our own feed here. Not sure why. generator.link(href=(URL_ROOT + "rss"), rel="self") favicon_path = URL_ROOT + "static/favicon.png" generator.icon(favicon_path) generator.logo(favicon_path) generator.subtitle( "A feed for anyone who wants to know what albums I'm liking.") generator.language("en") albums = get_recent_music(quantity=30) for album in albums: entry = generator.add_entry() entry.title(album.name) path_to_album = URL_ROOT + "music/music/{}".format(album.id) entry.guid(path_to_album, permalink=True) entry.description(album.description()) entry.updated(album.reviewed_at) entry.published(album.reviewed_at) entry.author(name=MY_NAME, email=MY_EMAIL) entry.link(href=path_to_album, rel="alternate") entry.category(term="score__{}".format(album.rating)) return HttpResponse(generator.rss_str())
def getcontent(): req = requests.get('http://localhost/article/content', json={"count": 10}, auth=('*****@*****.**', 'adminpassword')) value = req.json() fg = FeedGenerator() fg.title('A full feed') fg.link(href='http://localhost/feed/summary/content') fg.subtitle('This is a cool feed!') fg.language('en') for x in value: fe = fg.add_entry() fe.content(x.get('articles_content', '')) if x.get('articles_tags', '') != 'null': for v in x.get('articles_tags', ''): fe.category(term='Tag: ' + v) else: fe.category(term='Tag: None') if type(x.get('articles_comments', '')) == type(None): count = '0' else: count = str(len(x.get('articles_comments', ''))) fe.category(term='Comment Count: ' + count) fg.rss_file('content.xml') return "YOUR CONTENT WAS CREATED content.xml", 200
def create_mock_fg(): fg = FeedGenerator() fg.id(FEED_ID) fg.title('Some Test Feed') fg.author({'name': 'Edfward', 'email': '*****@*****.**'}) fg.subtitle('Test feed subtitle!') fg.link(href=FEED_ID, rel='self') fg.language('en') return fg
def rss(conversation, url, author_name, author_email, title, subtitle, language, output_path): """Export all the links of the conversation in a simple RSS feed""" from feedgen.feed import FeedGenerator fg = FeedGenerator() fg.id(url) fg.title(title) fg.author( { 'name': author_name, 'email': author_email, } ) fg.link( href=url, rel='alternate' ) if subtitle: fg.subtitle(subtitle) fg.language(language) for message in conversation.history(): match = re.search( "^.*<(?P<url>[^>|]+)\|?(?P<title>[^>]+)?>.*$", message.data["text"], flags=re.MULTILINE ) if match is not None: fe = fg.add_entry() link = match.group("url") title = match.group("title") or link date = naive_to_local(datetime.datetime.fromtimestamp(float(message.data["ts"]))) description = message.data["text"] if "attachments" in message.data: attachment = [a for a in message.data["attachments"] if a["title_link"] == link][0] title += " | " + attachment["title"] description += """ """ + attachment["text"] fe.id(link) fe.title(title) fe.link(href=link) fe.published(date) user = config.slack.get_user(message.data["user"]) author = { "name": message.data["username"], "email": user.email or "noemail", } fe.author(author) fe.description(description) fg.rss_file(output_path, pretty=True)
def build_feed_generator(query=None): gen = FeedGenerator() gen.title(FEED_TITLE) gen.subtitle(FEED_SUBTITLE) gen.language(FEED_LANG) feed_link = url_for('views.feed', query=query, _external=True) gen.link(href=feed_link, rel='self', type='application/rss+xml') return gen
def main(argv): ap = argparse.ArgumentParser( description=''' Render RSS and Atom feeds from a CSV of food inspection data. ''') ap.add_argument( '-v', '--verbose', action='count', dest='verbosity', default=0, help='increase global logging verbosity; can be used multiple times') ap.add_argument( '-f', '--format', choices=['rss', 'atom'], default='atom', help=''' specify the format to use when rendering the feed (default: %(default)s)') ''') ap.add_argument( '-n', '--num_incidents', metavar='<num>', type=int, default=10, help='render <num> recent incidents in the feed (default: %(default)s)') ap.add_argument( 'flavor', nargs='?', default='all', choices=['all', 'failures'], help='select the flavor of feed to render (default: %(default)s)') args = ap.parse_args() logging.basicConfig( level=logging.ERROR - args.verbosity * 10, style='{', format='{}: {{message}}'.format(ap.prog)) fg = FeedGenerator() fg.id('http://pgriess.github.io/dallas-foodscores/') fg.link(href=fg.id(), rel='self') fg.title('Dallas Food Inspection Scores') fg.subtitle(''' Food inspection scores from the official City of Dallas dataset; updated daily ''') fg.description(fg.subtitle()) fg.language('en') fg.author( name='Peter Griess', email='*****@*****.**', uri='https://twitter.com/pgriess') for i in get_inspections_to_feed(sys.stdin, args.num_incidents, args.flavor): fe = fg.add_entry() fe.title('{name} at {address} scored {score}'.format( name=i.name, address=i.address, score=i.score)) fe.id(fg.id() + '#!/' + str(abs(hash(i)))) fe.link(href=fe.id(), rel='alternate') fe.content(fe.title()) fe.published(TZ.localize(i.date)) if args.format == 'atom': print(fg.atom_str(pretty=True)) else: print(fg.rss_str(pretty=True))
def rss(request): # track it! # v=1 // Version. # &tid=UA-XXXXX-Y // Tracking ID / Property ID. # &cid=555 // Anonymous Client ID. # &t=pageview // Pageview hit type. # &dh=mydemo.com // Document hostname. # &dp=/home // Page. # &dt=homepage // Title. angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f') x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') cid = uuid.uuid5(angrates_uuid, ip) data = { 'v': 1, 'tid': 'UA-19269567-1', 'cid': cid, 't': 'pageview', 'dh': 'armstrongandgettybingo.com', 'dp': '/rss/', 'dt': 'Podcast', } requests.post('https://www.google-analytics.com/collect', data=data) fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://www.armstrongandgettybingo.com/rss') fg.podcast.itunes_category('News & Politics', 'Conservative (Right)') fg.podcast.itunes_explicit('no') fg.title('The Armstrong and Getty Show (Bingo)') fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} ) fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' ) fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png') fg.subtitle('Armstrong and Getty Bingo') fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.') fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' ) fg.language('en') pacific = pytz.timezone('America/Los_Angeles') for hour in Hour.objects.all().order_by('-pub_date'): fe = fg.add_entry() fe.id(hour.link) fe.title(hour.title) fe.description(hour.description) fe.enclosure(hour.link, 0, 'audio/mpeg') fe.published(pacific.localize(hour.pub_date)) return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
def create_fg(): # Create the feed fg = FeedGenerator() fg.id("http://www.accre.vanderbilt.edu") fg.title("ACCRE's Status Feed") fg.author(dict(name="Josh Arnold", email="*****@*****.**")) fg.link(href="http://www.accre.vanderbilt.edu", rel="alternate") fg.logo("http://www.accre.vanderbilt.edu/" "wp-content/themes/ashford/favicon.ico") fg.subtitle("ACCRE's Status Feed") fg.language('en') return fg
def blog_feed(self, values): domain = request.httprequest.host_url.rstrip('/') blog = values['blog'] fg = FeedGenerator() fg.id(domain + values['pager']['page']['url']) fg.title(blog.name) fg.subtitle(blog.subtitle) for post in values['blog_posts']: fe = fg.add_entry() fe.id(domain + '/blog/{}/post/{}'.format(slug(blog), slug(post))) fe.title(post.name) fe.content(post.content) return fg
def get(self): fg = FeedGenerator() fg.id("http://test.ts") fg.title("My Test Feed") fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32") fg.author({'name': "The Author", 'email': "*****@*****.**"}) fg.link(href="http://example.org/index.atom?page=2", rel="next") fg.link(href="http://test.ts", rel="alternate") fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32") fg.description("Este é o monstro do lago 1") fg.subtitle("This is an example feed!") fg.language("en-us") # Handle this: #< sy:updatePeriod > hourly < / sy:updatePeriod > #< sy:updateFrequency > 1 < / sy:updateFrequency > fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo"))) fi = fg.add_item() fi.id("http://test.ts/id/1", ) #fi.link(link="http://test.ts/id/1") fi.title("Monstro do Lago 1") fi.description("Este é o monstro do lago 1") fi.comments("http://test.ts/id/1/comments") fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo"))) fi = fg.add_item() fi.id("http://test.ts/id/2") fi.title("Monstro do Lago 2") fi.description("Este é o monstro do lago 2") fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo"))) #test = fg.atom_str(pretty=True) rss_str = fg.rss_str(pretty=True) self.set_header("Content-Type", 'application/xml; charset="utf-8"') #self.set_header("Content-Disposition", # "attachment; filename='test.xml'") self.write(rss_str) #if regexp.search(word) is not None: # print # 'matched' if self.is_browser_mobile(): print("buu") else: print(self.request.headers["User-Agent"])
def render_feed(text_paths, outpath): # http://rhodesmill.org/brandon/feed # http://rhodesmill.org/brandon/category/python/feed # http://rhodesmill.org/brandon/feed/atom/ t0 = datetime.min.time() def fix(d): dt = datetime.combine(d, t0) return timezone('US/Eastern').localize(dt) posts = [post_info(path) for path in text_paths if date_of(path)] posts = sorted(posts, key=lambda post: post['date']) posts = posts[-1:] most_recent_date = max(post['date'] for post in posts) def full(url): return 'http://rhodesmill.org/' + url.lstrip('/') fg = FeedGenerator() fg.id(full('/')) fg.author({'name': 'Brandon Rhodes'}) fg.language('en') fg.link(href=full('/brandon/'), rel='alternate') if 'python' in outpath: fg.link(href=full('/brandon/category/python/feed/'), rel='self') else: fg.link(href=full('/brandon/feed/'), rel='self') fg.subtitle('Thoughts and ideas from Brandon Rhodes') fg.title("Let's Discuss the Matter Further") fg.updated(fix(most_recent_date)) for post in posts: url = full(post['url_path']) excerpt = truncate_at_more(post['body_html'], url) fe = fg.add_entry() fe.content(excerpt, type='html') fe.guid(url, permalink=True) fe.id(url) fe.link({'href': url}) fe.published(fix(post['date'])) fe.title(post['title']) fe.updated(fix(post['date'])) rss = fg.rss_str(pretty=True) fg.link(href=full('/brandon/feed/atom/'), rel='self', replace=True) atom = fg.atom_str(pretty=True) return rss, atom
def setup_feed(): fg = FeedGenerator() fg.load_extension("podcast") fg.language("en") fg.id("https://jdelman.me/potato") fg.author(name="Potato", email="*****@*****.**") fg.link(href="https://jdelman.me/potato", rel="alternate") fg.logo("https://jdelman.me/static/potato.jpg") fg.title("Potato - Josh's Saved Videos") fg.subtitle("Automatically generated RSS.") return fg
def build(self, rss=False): if rss: self.site.rss_url = '/rss.xml' fg = FeedGenerator() fg.title(self.site.name) fg.author({'name': self.site.author}) fg.link(href=self.site.base_url, rel='alternate') fg.subtitle(self.site.description) start = time.time() getLogger().info("Copy Assets") self.output.copyAssets(self.basedir) getLogger().info("Start Build of static content") posts = [] for p in self.source.getPosts(): if not p.published: getLogger().info("Ingnoring draft Post %s (%s)", p.title, p.slug) continue posts.append(p) if rss: fe = fg.add_entry() fe.id("%s/%s" % (self.site.base_url, p.permalink)) fe.link(href=fe.id(), rel='alternate') fe.title(p.title) fe.published(p.created_at.replace(tzinfo=pytz.timezone(self.site.timezone))) category = [] for t in p.tags: category.append({'term': t}) fe.category(category) fe.content(p.content) Output.storeData(os.path.join(self.basedir, p.permalink), self.output.render(self.site, post=p)) getLogger().debug("Adding Post \"%s\" (%s)", p.title, p.slug) posts = sorted(posts, key=lambda k: k.created_at, reverse=True) Output.storeData(os.path.join(self.basedir, 'index.html'), self.output.render(self.site, posts=posts, post=None, is_home=True, pagination=None)) if rss: Output.storeData(os.path.join(self.basedir, 'rss.xml'), fg.rss_str(pretty=True)) getLogger().debug("You awesome RSS feed has been generated") getLogger().info("It took %d seconds to generate your awesome blog" % (time.time() - start))
def xml(): items = Item.query.join(Channel, Channel.id == Item.channel_id).add_columns(Item.update, Item.number, Item.file_url, Channel.title, Channel.name, Channel.personality, Channel.text, Channel.copyright, Channel.image_url).order_by(Item.update.desc()).all() fg = FeedGenerator() fg.load_extension('podcast') fg.link(href='http://www.onsen.ag/', rel='alternate') fg.title(u'音泉 for Podcast') fg.subtitle(u'音泉 アニメ・ゲーム・声優系ラジオ') for item in items: fe = fg.add_entry() fe.id(item.file_url) fe.title(u'[{0}][{1}]{2}'.format(item.update.strftime('%Y/%m/%d'), item.number, item.title)) fe.description(item.text) fe.enclosure(item.file_url, 0, 'audio/mpeg') tz = pytz.timezone('Asia/Tokyo') d = item.update.replace(tzinfo=tz) fe.pubdate(d.isoformat()) xml = fg.rss_str(pretty=True) return Response(xml, mimetype='text/xml')
class CommonNetworkFeedgen(object): """ For setting up rss feeds """ def __init__(self): self.feedgen_connection = FeedGenerator() self.feedgen_connection.id('http://lernfunk.de/media/654321') self.feedgen_connection.title('MediaKraken Notification Feed') self.feedgen_connection.author( {'name': 'John Doe', 'email': '*****@*****.**'}) self.feedgen_connection.link( href='http://example.com', rel='alternate') self.feedgen_connection.logo('http://ex.com/logo.jpg') self.feedgen_connection.subtitle('This is a cool feed!') self.feedgen_connection.link( href='http://larskiesow.de/test.atom', rel='self') self.feedgen_connection.language('en')
def render(self, data, accepted_media_type=None, renderer_context=None): """ Renders *obj* into serialized XML. """ if data is None: return '' fg = FeedGenerator() myclass = type(renderer_context['view']).__name__ if myclass == 'CollectionViewSet': feed_url = data['url'] feed_title = data['name'] feed_description = data['description'] resources = data['resources'] elif myclass in ['ResourceViewSet','ResourceSearch']: feed_url = "http://social.honzta.es/api/resources/" feed_title = "SocialLearning resources" feed_description = "SocialLearning resources" resources = data else: feed_url = "http://social.honzta.es/api/resources/" feed_title = "SocialLearning resources" feed_description = "SocialLearning resources" resources = [] fg.id(feed_url) fg.title(feed_title) fg.subtitle(feed_description) fg.author( {'name':'SocialLearning','email':'*****@*****.**'} ) fg.link( href=feed_url, rel='alternate' ) fg.logo('http://www.ovtt.org/sites/default/files/styles/ampliacion_noticia/public/logo_hontza_fondo-claro_opaco.png') fg.link( href=feed_url+'?format=rss', rel='self' ) fg.language('en') for item in resources: fe = fg.add_entry() fe.id(item['resource'] or 'http://social.honzta.es/api/resources/1/') fe.title(item['title'] or 'Not available') fe.link({'href':item['url'] or 'http://social.honzta.es/api/resources/1/'}) #fe.content(item['title']) #fe.description(item['description']) return fg.rss_str(pretty=True)
class Feed: def __init__(self, baseURL, audioDir): self.baseURL = baseURL self.dir = audioDir self.fg = FeedGenerator() self.fg.load_extension('podcast') self.fg.id(baseURL) self.fg.title('Yesterdays Baseball') self.fg.author( name='MLB' ) self.fg.link( href=baseURL, rel='alternate' ) self.fg.logo('http://en.wikipedia.org/wiki/Major_League_Baseball_logo#/media/File:Major_League_Baseball.svg') self.fg.icon('http://en.wikipedia.org/wiki/Major_League_Baseball_logo#/media/File:Major_League_Baseball.svg') self.fg.subtitle("Awright, 'arry? See that ludicrous display last night?") self.fg.link( href=baseURL+'podcast.xml', rel='self' ) self.fg.language('en') self.fg.podcast.itunes_explicit('no') self.fg.podcast.itunes_complete('no') self.fg.podcast.itunes_new_feed_url(baseURL+'podcast.xml') self.fg.podcast.itunes_summary("Awright, 'arry? See that ludicrous display last night?") self.addAllEntries() def __repr__(self): return self.fg.rss_str(pretty=True) def addAllEntries(self): for root, dirs, files in os.walk(self.dir): for f in files: if os.path.splitext(f)[1] in MIME_TYPES.keys(): self.addEntry(root,f) def addEntry(self,root,f): path = os.path.join(root,f) fileName, fileExtension = os.path.splitext(f) print "Adding...",path fe = self.fg.add_entry() fe.id(self.baseURL+f) mediafile = ID3(path) fe.title(mediafile['TIT2'].text[0] + " " + fileName) fe.summary(mediafile['TPE1'].text[0]) fe.content(mediafile['TPE1'].text[0]) fe.enclosure(self.baseURL+f, 0, MIME_TYPES[fileExtension])
def parseString(html, url, tag): parsed_html = BeautifulSoup(html) parsedUrl = urlparse(url) baseUrl = parsedUrl.scheme+"://"+parsedUrl.netloc fg = FeedGenerator() fg.id(url) fg.title('Generated feed for ' + url) fg.link( href=url, rel='alternate' ) fg.subtitle('Autogenerated by alltorss.py based on tag ' + tag) for item in parsed_html.body.find_all(tag): topic = item.text.strip() #check if item contains a link innerLink = findInnerLink(item) outerLink = findOuterLink(item) if (innerLink != None): link = innerLink elif (outerLink != None): link = outerLink else: link = None if isinstance(link, Tag) and link.has_attr('href'): linkHref = link['href'] fe = fg.add_entry() if (linkIsAbsolute(linkHref)): fullLink = linkHref else: fullLink = baseUrl + linkHref fe.id(fullLink) fe.title(topic) fe.link( href=fullLink ) return fg
def serve_filter(type, filtername): try: fil = trn.select_unique(Filter, name=filtername, insert=False) except: logging.exception("serve_filter failed for filter %s" % (filtername,)) raise out_feed = FeedGenerator() out_feed.title(fil.title) out_feed.subtitle(fil.subtitle) out_feed.id(filtername) for entry in fil.entries(): d = entry.definition out_entry = out_feed.add_entry() out_entry.title(d.title) out_entry.published(getattr(d, "published", None)) out_entry.updated(getattr(d, "updated", None)) out_entry.id(d.id) out_entry.summary(d.summary) for c in getattr(d, "content", []): out_entry.content(content=c.value, type=c.type) #, src=c.base for l in getattr(d, "links", []): out_entry.link(link=l) try: if type == "atom": mimetype = "application/atom+xml" result = out_feed.atom_str() else: mimetype = "application/rss+xml" result = out_feed.rss_str() except: logging.exception("%s error", type) mimetype = "text/plain" result = """ An error occurred while trying to produce this feed. You could try using %s instead. """ % ("rss" if type == "atom" else "atom",) response.content_type = mimetype return result
def generate_rss(pages_info=None): fg = FeedGenerator() fg.id(conf['base_url']) fg.title(conf['title']) fg.author( {'name':conf['author'],'email':conf['email']} ) fg.link( href=conf['base_url'], rel='alternate' ) fg.subtitle(conf['description']) fg.link( href=conf['base_url']+'/rss.xml', rel='self' ) fg.language('en') for post in pages_info: fe = fg.add_entry() fe.id('http://blog.elijahcaine.me/'+post['url']) fe.title(post['title']) fe.author( {'name':conf['author'],'email':conf['email']} ) fe.link( href=conf['base_url']+post['url'], rel='alternate' ) fe.description( post['content']['fragment'] ) rssfeed = fg.rss_str(pretty=True) fg.rss_file('build/'+conf['rss_feed']) return rssfeed
def createFeed(links, titles): # feed dosyasini olustur fg = FeedGenerator() fg.load_extension("podcast") fg.id("http://twitter.com/dorukcankisin") fg.title(DIZI_TITLE) fg.author({"name": "dorukcan kisin", "email": "*****@*****.**"}) fg.link(href="http://twitter.com/dorukcankisin", rel="alternate") fg.logo(DIZI_LOGO) fg.subtitle(DIZI_TITLE + " videocast") fg.language("en") for i, url in enumerate(links): fe = fg.add_entry() fe.id(url) fe.enclosure(url, 0, "video/mp4") fe.title(titles[i]) fe.description(titles[i]) fg.rss_file("rss.xml") return fg.rss_str(pretty=True)
def rss(): fg = FeedGenerator() fg.id('http://shockham.com/') fg.title('shockham.') fg.author({'name': 'shockham', 'email': ''}) fg.link(href='http://shockham.com', rel='alternate') fg.logo(url_for('static', filename='images/new_logo.png')) fg.subtitle('RSS feed for shockhams site!') fg.link(href='http://shockham.com/rss', rel='self') fg.language('en') concepts = Concept.objects(Q(slug__ne='none') & Q(parent__ne='none')) for concept in concepts: fe = fg.add_entry() fe.id('http://shockham.com/' + concept.slug) fe.title(concept.title) response = make_response(fg.rss_str(pretty=True)) response.headers["Content-Type"] = "application/xml" return response
def make_feed(query, url): query = query.options(joinedload(tables.Event.city)) query = query.options(joinedload(tables.Event.venue)) query = query.order_by(desc(tables.Event.date)) from feedgen.feed import FeedGenerator fg = FeedGenerator() fg.id('http://pyvo.cz') fg.title('Pyvo') fg.logo('http://ex.com/logo.jpg') fg.link(href=url, rel='self') fg.subtitle('Srazy Pyvo.cz') for event in query: fe = fg.add_entry() url = url_for('city', cityslug=event.city.slug, _external=True) + '#{}'.format(event.date) fe.id(url) fe.link(href=url, rel='alternate') fe.title(event.title) fe.summary(event.description) fe.published(event.start) fe.updated(event.start) # XXX: Put talks into fe.dscription(), videos in link(..., rel='related') return fg
def make_feed(filename='epistles.xml'): fg = FeedGenerator() fg.title('Daily Epistles') fg.author({'name': 'Tim Hopper'}) fg.subtitle('Listen to the New Testament epistles each month.') fg.language('en') fg.link(href='http://www.crossway.com', rel='alternate') for day, division in enumerate(get_divisons(), 1): entry = fg.add_entry() entry.id(division) entry.title(division) pubdate = datetime.datetime(year=datetime.datetime.now().year, month=datetime.datetime.now().month, day=day, hour=pubhour, tzinfo=tz) entry.published(pubdate) entry.enclosure(get_url(division), 0, 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file('epistles.xml')
def make_feed(query, url): query = query.options(joinedload(tables.Event.city)) query = query.options(joinedload(tables.Event.venue)) query = query.order_by(desc(tables.Event.date)) from feedgen.feed import FeedGenerator fg = FeedGenerator() fg.id('http://pyvo.cz') fg.title('Pyvo') fg.logo(url_for('static', filename='images/krygl.png', _external=True)) fg.link(href=url, rel='self') fg.subtitle('Srazy Pyvo.cz') for event in query: fe = fg.add_entry() url = filters.event_url(event, _external=True) fe.id(url) fe.link(href=url, rel='alternate') fe.title(event.title) fe.summary(event.description) fe.published(event.start) fe.updated(event.start) # XXX: Put talks into fe.dscription(), videos in link(..., rel='related') return fg
def get_feed(db): fg = FeedGenerator() fg.id('http://www.academis.eu/feed') fg.title('Academis Blog') fg.author( {'name':'Kristian Rother','email':'*****@*****.**'} ) fg.link( href='http://www.academis.eu', rel='alternate' ) fg.logo('http://www.academis.eu/static/images/academis_kr350.png') fg.subtitle('Articles on Python programming, Data analysis and Leadership in tech') fg.link( href='http://www.academis.eu/academis.atom', rel='self' ) fg.language('en') fg.contributor( name='Kristian Rother', email='*****@*****.**' ) for title, slug in get_all_posts(db): title, content = get_post(db, slug) fe = fg.add_entry() fe.id('http://www.academis.eu/posts/{}'.format(slug)) fe.link(href='http://www.academis.eu/posts/{}'.format(slug)) fe.title(title) fe.description(content[:300]) rssfeed = fg.rss_str(pretty=True) fg.rss_file('rss.xml') # Write the RSS feed to a file return rssfeed
def gen_rss(items, date_string, atom=False): fg = FeedGenerator() fg.title('Open Hunt: %s' % date_string) fg.id('%s/%s' % (BASE_URL, date_string)) # setting more fields that we don't really need # to appease the atom/rss generators fg.author({'name': 'Christopher Su', 'email': '*****@*****.**'}) fg.description('Open Hunt items for %s' % date_string) fg.subtitle('Open Hunt items for %s' % date_string) fg.language('en') if atom: fg.link({'href': '%s/%s/atom' % (BASE_URL, date_string), 'rel': 'self'}) else: fg.link({'href': '%s/%s/rss' % (BASE_URL, date_string), 'rel': 'self'}) counter = 0 for item in items: fe = fg.add_entry() # id is a mandatory field for atom # we're using it wrong here, but oh well # should probably use open hunt's slugs instead if atom: fe.id(counter) counter += 1 fe.link({'href': item['href'], 'rel': 'alternate'}) fe.title(item['title']) fe.description(str(item['score'])) fe.content(item['description']) fe.author({'name': item['author'], 'email': '@%s' % item['author']}) if atom: return fg.atom_str(pretty=True) return fg.rss_str(pretty=True)