def get_feed_entry(media_file, basedir, baselink, image_url): ''' Generate a feed entry based on ID3 Data TODO: Deal with files with no ID3 Data ''' fe = FeedEntry () fe.load_extension('podcast') file_path = '{}/{}'.format(basedir, media_file) media_info = EasyID3(file_path) media_length_s = mutagen.File(file_path).info.length media_length = datetime.timedelta(seconds=round(media_length_s)) fe.title(media_info['title'][0]) fe.description('Part {} of {}'.format(media_info['tracknumber'][0], media_info['album'][0])) fe.podcast.itunes_duration(media_length) url = '{}/{}'.format(baselink, urllib.pathname2url(media_file)) fe.id(url) fe.link(href=url, rel='alternate') fe.pubdate('{} +0000'.format(datetime.datetime.utcfromtimestamp(os.path.getmtime(file_path)))) fe.enclosure(url, str(os.path.getsize(file_path)), mimetypes.guess_type(file_path)[0]) #Found no need to for this at this time since all podcasts have the same feed image #fe.podcast.itunes_image(image_url) return fe
def _generate_feed_entry(feed, max_ssh_info, config, atom_path): now = arrow.now() fe = FeedEntry() fe.title( "Storm Surge Alert for {[tide gauge stn]}".format( config["storm surge feeds"]["feeds"][feed] ) ) fe.id( _build_tag_uri( now.format("YYYY-MM-DD"), feed, now, config["storm surge feeds"], atom_path ) ) fe.author( name="Salish Sea MEOPAR Project", uri=f'https://{config["storm surge feeds"]["domain"]}/', ) fe.content(_render_entry_content(feed, max_ssh_info, config), type="html") fe.link( rel="alternate", type="text/html", href=f'https://{config["storm surge feeds"]["domain"]}/' f"storm-surge/forecast.html", ) return fe
def update_article(self, article: Article): fe = FeedEntry() fe.id(article.link) fe.link(href=article.link) fe.title(article.title) fe.description(article.description) fe.pubDate(article.date.replace(tzinfo=gettz("Asia/Shanghai"))) self.fg.entry(fe, replace=True) self.fg.updated()
def as_feed_entry(self) -> FeedEntry: result = FeedEntry() result.id(self.id) result.link(href=self.link) result.content(self.content) result.title(self.title()) result.published(self.published) result.enclosure(self.get_file_url(), 0, 'audio/mpeg') return result
async def create_entry(article): heading = article.select_one(".main-article-heading-box") entry = FeedEntry() entry.id(heading.select_one("a").get("href")) entry.link(href=heading.select_one("a").get("href")) entry.title(" / ".join(s.text for s in heading.select("span"))) name = article.select_one(".main-article-author-box a").text.strip() entry.author({"name": name}) entry.summary(article.select_one(".perex-lim").text.strip()) date = article.select_one(".main-article-author-box small").text.strip() date = datetime.strptime(date, "%d.%m.%Y, %H:%M") entry.published(date.replace(tzinfo=timezone.utc)) return entry
def _generate_feed_entry(feed, max_ssh_info, config, atom_path): now = arrow.now() fe = FeedEntry() fe.title('Storm Surge Alert for {[tide gauge stn]}'.format( config['storm surge feeds']['feeds'][feed])) fe.id( _build_tag_uri(now.format('YYYY-MM-DD'), feed, now, config['storm surge feeds'], atom_path)) fe.author(name='Salish Sea MEOPAR Project', uri=f'https://{config["storm surge feeds"]["domain"]}/') fe.content(_render_entry_content(feed, max_ssh_info, config), type='html') fe.link(rel='alternate', type='text/html', href=f'https://{config["storm surge feeds"]["domain"]}/' f'storm-surge/forecast.html') return fe
def users_logs_feed(context, request): entries = [] for log in (request.db.query(Log).filter( Log.creator_id == context.id).order_by( Log.last_modified.desc()).limit(25).all()): entry = FeedEntry() entry.id(str(log.id)) entry.title(log.name) entry.content(log.summary or "No summary.") entry.published(utc.localize(log.created)) entries.append(entry) return { "title": "%s's logs" % context.username, "entries": entries, }
def feed_entry(notice, url_root): _id = notice.id title = f"{_id}: {notice.title}" description = notice.details published = notice.published notice_path = flask.url_for(".notice", notice_id=notice.id).lstrip("/") link = f"{url_root}{notice_path}" entry = FeedEntry() entry.id(link) entry.title(title) entry.description(description) entry.link(href=link) entry.published(f"{published} UTC") entry.author({"name": "Ubuntu Security Team"}) return entry
def generate_feed(stored_topics): fg = feed_header() for topic_id in stored_topics: topic = stored_topics[topic_id] if len(topic['comments']['comments']): last_comment_time = topic['comments']['comments'][-1]['date'] else: last_comment_time = topic['utc_date'] entry_id = '{}_{}'.format(topic['id'], last_comment_time) title = topic['title'] if is_title_blacklisted(title): continue text = topic['text'] text = cleanup_html(text) summary = text if len(summary) > 400: summary = summary[:390] + '...' text += '<p>Автор: <b>{}</b></p>'.format(topic['author']) for comment in topic['comments']['comments']: rendered = render_comment(comment) if rendered: text += '<p>{}</p>'.format(rendered) text = text.replace('\n', '<br />') url = topic['link'] author = topic['author'] entry = FeedEntry() entry.id(entry_id) entry.title(title) # entry.description(summary) entry.content(text) entry.link({'href': url}) entry.author({'name': author}) fg.add_entry(entry) return fg
def make_entry(f, yargs, html): """Construct a (datetime, FeedEntry)...""" from feedgen.entry import FeedEntry uri = yargs.feed_link + (str(f.parent) + "/").replace("./", "") + str( f.stem) + ".html" print(uri) title = str(f.stem).replace('_', ' ').title() updated = datetime.datetime.fromtimestamp(os.path.getmtime(f), datetime.timezone.utc) # anything YAML based to get better metadata goes here too, I suppose y = getyaml(f) print(y) if "title" in y: title = y["title"] e = FeedEntry() e.link(href=uri) e.id(uri) e.content(html) e.updated(updated) e.title(title) if "date" in y: d = y["date"] # anything other than the below is super messy e.published( datetime.datetime(d.year, d.month, d.day, tzinfo=datetime.timezone.utc)) if "keywords" in y: for k in y["keywords"]: e.category(category={'term': k, 'scheme': '', 'label': k}) if "subtitle" in y: # close enough e.summary(y["subtitle"]) if "abstract" in y: # but this is even better, if it exists e.summary(y["abstract"]) return (updated, e)
def users_favorites_feed(context, request): entries = [] for favorite in (request.db.query(Favorite).filter( Favorite.user_id == context.id).order_by( Favorite.created.desc()).options( joinedload(Favorite.log).joinedload( Log.creator)).limit(25).all()): entry = FeedEntry() entry.id(str(favorite.log.id)) entry.title("%s favorited %s." % (context.username, favorite.log.name)) entry.content(favorite.log.summary or "No summary.") entry.published(utc.localize(favorite.created)) entries.append(entry) return { "title": "%s's favorites" % context.username, "entries": entries, }
def users_logs_feed(context, request): entries = [] for log in ( request.db.query(Log) .filter(Log.creator_id == context.id) .order_by(Log.last_modified.desc()) .limit(25).all() ): entry = FeedEntry() entry.id(str(log.id)) entry.title(log.name) entry.content(log.summary or "No summary.") entry.published(utc.localize(log.created)) entries.append(entry) return { "title": "%s's logs" % context.username, "entries": entries, }
def users_subscriptions_feed(context, request): entries = [] for subscription in (request.db.query(LogSubscription).filter( LogSubscription.user_id == context.id).order_by( LogSubscription.created.desc()).options( joinedload(LogSubscription.log).joinedload( Log.creator)).limit(25).all()): entry = FeedEntry() entry.id(str(subscription.log.id)) entry.title("%s subscribed to %s." % (context.username, subscription.log.name)) entry.content(subscription.log.summary or "No summary.") entry.published(utc.localize(subscription.created)) entries.append(entry) return { "title": "%s's subscriptions" % context.username, "entries": entries, }
def users_favorites_feed(context, request): entries = [] for favorite in ( request.db.query(Favorite) .filter(Favorite.user_id == context.id) .order_by(Favorite.created.desc()) .options(joinedload(Favorite.log).joinedload(Log.creator)) .limit(25).all() ): entry = FeedEntry() entry.id(str(favorite.log.id)) entry.title("%s favorited %s." % (context.username, favorite.log.name)) entry.content(favorite.log.summary or "No summary.") entry.published(utc.localize(favorite.created)) entries.append(entry) return { "title": "%s's favorites" % context.username, "entries": entries, }
def users_subscriptions_feed(context, request): entries = [] for subscription in ( request.db.query(LogSubscription) .filter(LogSubscription.user_id == context.id) .order_by(LogSubscription.created.desc()) .options(joinedload(LogSubscription.log).joinedload(Log.creator)) .limit(25).all() ): entry = FeedEntry() entry.id(str(subscription.log.id)) entry.title("%s subscribed to %s." % (context.username, subscription.log.name)) entry.content(subscription.log.summary or "No summary.") entry.published(utc.localize(subscription.created)) entries.append(entry) return { "title": "%s's subscriptions" % context.username, "entries": entries, }
async def recent_episodes(): logo_link = url_for('static', filename='favicon.png', _external=True) feed = FeedGenerator() feed.title('Arrowverse.info - Recent Episodes') feed.id(request.url_root) feed.link(href=request.url) feed.logo(logo_link) feed.icon(logo_link) feed.language('en') hide_shows_list = request.args.getlist('hide_show') newest_first_episode_list = get_full_series_episode_list( excluded_series=hide_shows_list)[::-1] for episode in newest_first_episode_list[:15]: title = '{series} - {episode_id} - {episode_name}'.format(**episode) content = '{series} {episode_id} {episode_name} will air on {air_date}'.format( **episode) show_dict = app.config['SHOW_DICT_WITH_NAMES'][episode['series']] data_source = f"{show_dict['root']}{show_dict['url']}" feed_entry = FeedEntry() feed_entry.id(data_source) feed_entry.link({'href': data_source}) feed_entry.title(title) feed_entry.content(content, type='text') feed_entry.author(uri=show_dict['root']) feed.add_entry(feed_entry) response = await make_response(feed.atom_str(pretty=True)) response.headers['Content-Type'] = 'application/atom+xml' return response