def build_entry(article, blog_uri): title = article["title"]["rendered"] slug = article["slug"] author = article["_embedded"]["author"][0] description = article["excerpt"]["rendered"] content = article["content"]["rendered"] published = f'{article["date_gmt"]} GMT' updated = f'{article["modified_gmt"]} GMT' link = f"{blog_uri}/{slug}" categories = [] if "wp:term" in article["_embedded"]: for category in article["_embedded"]["wp:term"][1]: categories.append( dict(term=category["slug"], label=category["name"])) entry = FeedEntry() entry.title(title) entry.description(description) entry.content(content) entry.author(name=author["name"], email=author["name"]) entry.link(href=link) entry.category(categories) entry.published(published) entry.updated(updated) return entry
def _get_entries(self): ret = [] chan_tag = self.__rss_xml.find('channel') with ThreadPool(max_workers=20) as pool: item_contents = pool.map( self.__get_html_content, [item_tag.find('link').text for item_tag in chan_tag.findall('item')] ) for item_tag, content in zip(chan_tag.findall('item'), item_contents): e = FeedEntry() e.load_extension('dc') item_details = self.__get_xml_dict( item_tag, ['title', 'link', 'guid', 'pubDate', '{%s}creator' % self.__DC_NS] ) e.title(item_details['title']) e.link(href=item_details['link'], rel='alternate') e.guid(item_details['guid']) e.dc.dc_creator(item_details['{%s}creator' % self.__DC_NS]) e.pubdate(dateparser.parse(item_details['pubDate'])) e.content('<p>%s</p>' % content, type='CDATA') ret.append(e) return ret
def _generate_feed_entry(feed, max_ssh_info, config, atom_path): now = arrow.now() fe = FeedEntry() fe.title( "Storm Surge Alert for {[tide gauge stn]}".format( config["storm surge feeds"]["feeds"][feed] ) ) fe.id( _build_tag_uri( now.format("YYYY-MM-DD"), feed, now, config["storm surge feeds"], atom_path ) ) fe.author( name="Salish Sea MEOPAR Project", uri=f'https://{config["storm surge feeds"]["domain"]}/', ) fe.content(_render_entry_content(feed, max_ssh_info, config), type="html") fe.link( rel="alternate", type="text/html", href=f'https://{config["storm surge feeds"]["domain"]}/' f"storm-surge/forecast.html", ) return fe
def as_feed_entry(self) -> FeedEntry: result = FeedEntry() result.id(self.id) result.link(href=self.link) result.content(self.content) result.title(self.title()) result.published(self.published) result.enclosure(self.get_file_url(), 0, 'audio/mpeg') return result
def rss_entries(links, feed=None): entries = [] for link in links: fe = FeedEntry() fe.title(link.title) fe.content(link.text) fe.summary("Post by {} in {}.".format( link.user.name, feed.name if feed else link.feed.name)) fe.link(href=link.url) fe.published(link.created_at) fe.comments(link.url) fe.author(name=link.user.name) entries.append(fe) return entries
def _generate_feed_entry(feed, max_ssh_info, config, atom_path): now = arrow.now() fe = FeedEntry() fe.title('Storm Surge Alert for {[tide gauge stn]}'.format( config['storm surge feeds']['feeds'][feed])) fe.id( _build_tag_uri(now.format('YYYY-MM-DD'), feed, now, config['storm surge feeds'], atom_path)) fe.author(name='Salish Sea MEOPAR Project', uri=f'https://{config["storm surge feeds"]["domain"]}/') fe.content(_render_entry_content(feed, max_ssh_info, config), type='html') fe.link(rel='alternate', type='text/html', href=f'https://{config["storm surge feeds"]["domain"]}/' f'storm-surge/forecast.html') return fe
def users_logs_feed(context, request): entries = [] for log in (request.db.query(Log).filter( Log.creator_id == context.id).order_by( Log.last_modified.desc()).limit(25).all()): entry = FeedEntry() entry.id(str(log.id)) entry.title(log.name) entry.content(log.summary or "No summary.") entry.published(utc.localize(log.created)) entries.append(entry) return { "title": "%s's logs" % context.username, "entries": entries, }
def _build_feed( self, blog_url, feed_url, feed_title, feed_description, articles ): """ Build the content for the feed :blog_url: string blog url :feed_url: string url :feed_title: string title :feed_description: string description :param articles: Articles to create feed from """ feed = FeedGenerator() feed.generator("Python Feedgen") feed.title(feed_title) feed.description(feed_description) feed.link(href=feed_url, rel="self") for article in articles: title = article["title"]["rendered"] slug = article["slug"] author = article["_embedded"]["author"][0] description = article["excerpt"]["rendered"] content = article["content"]["rendered"] published = f'{article["date_gmt"]} GMT' updated = f'{article["modified_gmt"]} GMT' link = f"{blog_url}/{slug}" categories = [] if "wp:term" in article["_embedded"]: for category in article["_embedded"]["wp:term"][1]: categories.append( dict(term=category["slug"], label=category["name"]) ) entry = FeedEntry() entry.title(title) entry.description(description) entry.content(content) entry.author(name=author["name"], email=author["name"]) entry.link(href=link) entry.category(categories) entry.published(published) entry.updated(updated) feed.add_entry(entry, order="append") return feed
def generate_feed(stored_topics): fg = feed_header() for topic_id in stored_topics: topic = stored_topics[topic_id] if len(topic['comments']['comments']): last_comment_time = topic['comments']['comments'][-1]['date'] else: last_comment_time = topic['utc_date'] entry_id = '{}_{}'.format(topic['id'], last_comment_time) title = topic['title'] if is_title_blacklisted(title): continue text = topic['text'] text = cleanup_html(text) summary = text if len(summary) > 400: summary = summary[:390] + '...' text += '<p>Автор: <b>{}</b></p>'.format(topic['author']) for comment in topic['comments']['comments']: rendered = render_comment(comment) if rendered: text += '<p>{}</p>'.format(rendered) text = text.replace('\n', '<br />') url = topic['link'] author = topic['author'] entry = FeedEntry() entry.id(entry_id) entry.title(title) # entry.description(summary) entry.content(text) entry.link({'href': url}) entry.author({'name': author}) fg.add_entry(entry) return fg
def make_entry(f, yargs, html): """Construct a (datetime, FeedEntry)...""" from feedgen.entry import FeedEntry uri = yargs.feed_link + (str(f.parent) + "/").replace("./", "") + str( f.stem) + ".html" print(uri) title = str(f.stem).replace('_', ' ').title() updated = datetime.datetime.fromtimestamp(os.path.getmtime(f), datetime.timezone.utc) # anything YAML based to get better metadata goes here too, I suppose y = getyaml(f) print(y) if "title" in y: title = y["title"] e = FeedEntry() e.link(href=uri) e.id(uri) e.content(html) e.updated(updated) e.title(title) if "date" in y: d = y["date"] # anything other than the below is super messy e.published( datetime.datetime(d.year, d.month, d.day, tzinfo=datetime.timezone.utc)) if "keywords" in y: for k in y["keywords"]: e.category(category={'term': k, 'scheme': '', 'label': k}) if "subtitle" in y: # close enough e.summary(y["subtitle"]) if "abstract" in y: # but this is even better, if it exists e.summary(y["abstract"]) return (updated, e)
def users_favorites_feed(context, request): entries = [] for favorite in (request.db.query(Favorite).filter( Favorite.user_id == context.id).order_by( Favorite.created.desc()).options( joinedload(Favorite.log).joinedload( Log.creator)).limit(25).all()): entry = FeedEntry() entry.id(str(favorite.log.id)) entry.title("%s favorited %s." % (context.username, favorite.log.name)) entry.content(favorite.log.summary or "No summary.") entry.published(utc.localize(favorite.created)) entries.append(entry) return { "title": "%s's favorites" % context.username, "entries": entries, }
def users_logs_feed(context, request): entries = [] for log in ( request.db.query(Log) .filter(Log.creator_id == context.id) .order_by(Log.last_modified.desc()) .limit(25).all() ): entry = FeedEntry() entry.id(str(log.id)) entry.title(log.name) entry.content(log.summary or "No summary.") entry.published(utc.localize(log.created)) entries.append(entry) return { "title": "%s's logs" % context.username, "entries": entries, }
def _parse_html(self, html): soup = BeautifulSoup(html, 'html.parser') ret = [] for i, li in enumerate(soup.find_all('li')): e = FeedEntry() e.load_extension('dc') e.title('title: <p> #%d' % i) e.link(href='http://%d.%s' % (i, self.__URL), rel='alternate') e.dc.dc_creator('author') e.description('description: <p> #%d' % i) e.content('content: %s' % li.text, type='CDATA') e.pubdate(datetime.now(pytz.utc) + timedelta(minutes=i)) ret.append(e) return ret
def users_subscriptions_feed(context, request): entries = [] for subscription in (request.db.query(LogSubscription).filter( LogSubscription.user_id == context.id).order_by( LogSubscription.created.desc()).options( joinedload(LogSubscription.log).joinedload( Log.creator)).limit(25).all()): entry = FeedEntry() entry.id(str(subscription.log.id)) entry.title("%s subscribed to %s." % (context.username, subscription.log.name)) entry.content(subscription.log.summary or "No summary.") entry.published(utc.localize(subscription.created)) entries.append(entry) return { "title": "%s's subscriptions" % context.username, "entries": entries, }
def users_subscriptions_feed(context, request): entries = [] for subscription in ( request.db.query(LogSubscription) .filter(LogSubscription.user_id == context.id) .order_by(LogSubscription.created.desc()) .options(joinedload(LogSubscription.log).joinedload(Log.creator)) .limit(25).all() ): entry = FeedEntry() entry.id(str(subscription.log.id)) entry.title("%s subscribed to %s." % (context.username, subscription.log.name)) entry.content(subscription.log.summary or "No summary.") entry.published(utc.localize(subscription.created)) entries.append(entry) return { "title": "%s's subscriptions" % context.username, "entries": entries, }
def users_favorites_feed(context, request): entries = [] for favorite in ( request.db.query(Favorite) .filter(Favorite.user_id == context.id) .order_by(Favorite.created.desc()) .options(joinedload(Favorite.log).joinedload(Log.creator)) .limit(25).all() ): entry = FeedEntry() entry.id(str(favorite.log.id)) entry.title("%s favorited %s." % (context.username, favorite.log.name)) entry.content(favorite.log.summary or "No summary.") entry.published(utc.localize(favorite.created)) entries.append(entry) return { "title": "%s's favorites" % context.username, "entries": entries, }
def _get_entries(self): playlist = self.__api.playlistItems().list( playlistId=self.__uploads_id, part="contentDetails", maxResults=20 ).execute() videos = self.__api.videos().list( id=','.join(item['contentDetails']['videoId'] for item in playlist['items']), part='snippet,contentDetails' ).execute() ret = [] for item in videos['items']: snip = item['snippet'] duration = self.__parse_duration(aniso8601.parse_duration(item['contentDetails']['duration']).seconds) title = '%s [%s]' % (snip['title'], duration) e = FeedEntry() e.load_extension('dc') e.dc.dc_creator('none') e.title(title) e.link(href=self.__VIDEO_URL % item['id'], rel='alternate') e.description(title) e.pubdate(aniso8601.parse_datetime(snip['publishedAt'])) content_args = { 'image': snip['thumbnails']['high']['url'], 'content': self.__parse_content(snip) # TODO: some comments i think? # 'comments': } e.content(self.__CONTENT % content_args, type='CDATA') ret.append(e) return ret
async def recent_episodes(): logo_link = url_for('static', filename='favicon.png', _external=True) feed = FeedGenerator() feed.title('Arrowverse.info - Recent Episodes') feed.id(request.url_root) feed.link(href=request.url) feed.logo(logo_link) feed.icon(logo_link) feed.language('en') hide_shows_list = request.args.getlist('hide_show') newest_first_episode_list = get_full_series_episode_list( excluded_series=hide_shows_list)[::-1] for episode in newest_first_episode_list[:15]: title = '{series} - {episode_id} - {episode_name}'.format(**episode) content = '{series} {episode_id} {episode_name} will air on {air_date}'.format( **episode) show_dict = app.config['SHOW_DICT_WITH_NAMES'][episode['series']] data_source = f"{show_dict['root']}{show_dict['url']}" feed_entry = FeedEntry() feed_entry.id(data_source) feed_entry.link({'href': data_source}) feed_entry.title(title) feed_entry.content(content, type='text') feed_entry.author(uri=show_dict['root']) feed.add_entry(feed_entry) response = await make_response(feed.atom_str(pretty=True)) response.headers['Content-Type'] = 'application/atom+xml' return response