def build_entry(article, blog_uri): title = article["title"]["rendered"] slug = article["slug"] author = article["_embedded"]["author"][0] description = article["excerpt"]["rendered"] content = article["content"]["rendered"] published = f'{article["date_gmt"]} GMT' updated = f'{article["modified_gmt"]} GMT' link = f"{blog_uri}/{slug}" categories = [] if "wp:term" in article["_embedded"]: for category in article["_embedded"]["wp:term"][1]: categories.append( dict(term=category["slug"], label=category["name"])) entry = FeedEntry() entry.title(title) entry.description(description) entry.content(content) entry.author(name=author["name"], email=author["name"]) entry.link(href=link) entry.category(categories) entry.published(published) entry.updated(updated) return entry
def get_feed_entry(media_file, basedir, baselink, image_url): ''' Generate a feed entry based on ID3 Data TODO: Deal with files with no ID3 Data ''' fe = FeedEntry () fe.load_extension('podcast') file_path = '{}/{}'.format(basedir, media_file) media_info = EasyID3(file_path) media_length_s = mutagen.File(file_path).info.length media_length = datetime.timedelta(seconds=round(media_length_s)) fe.title(media_info['title'][0]) fe.description('Part {} of {}'.format(media_info['tracknumber'][0], media_info['album'][0])) fe.podcast.itunes_duration(media_length) url = '{}/{}'.format(baselink, urllib.pathname2url(media_file)) fe.id(url) fe.link(href=url, rel='alternate') fe.pubdate('{} +0000'.format(datetime.datetime.utcfromtimestamp(os.path.getmtime(file_path)))) fe.enclosure(url, str(os.path.getsize(file_path)), mimetypes.guess_type(file_path)[0]) #Found no need to for this at this time since all podcasts have the same feed image #fe.podcast.itunes_image(image_url) return fe
def generate_item(msg): fe = FeedEntry() fe.author(msg.sent_from) fe.pubDate(msg.date) fe.title(msg.subject) # print(f"{fe.title()} - {fe.pubDate()}") content = msg.body.get("html") fe.description(content, isSummary=True) return fe
def update_article(self, article: Article): fe = FeedEntry() fe.id(article.link) fe.link(href=article.link) fe.title(article.title) fe.description(article.description) fe.pubDate(article.date.replace(tzinfo=gettz("Asia/Shanghai"))) self.fg.entry(fe, replace=True) self.fg.updated()
def _build_feed( self, blog_url, feed_url, feed_title, feed_description, articles ): """ Build the content for the feed :blog_url: string blog url :feed_url: string url :feed_title: string title :feed_description: string description :param articles: Articles to create feed from """ feed = FeedGenerator() feed.generator("Python Feedgen") feed.title(feed_title) feed.description(feed_description) feed.link(href=feed_url, rel="self") for article in articles: title = article["title"]["rendered"] slug = article["slug"] author = article["_embedded"]["author"][0] description = article["excerpt"]["rendered"] content = article["content"]["rendered"] published = f'{article["date_gmt"]} GMT' updated = f'{article["modified_gmt"]} GMT' link = f"{blog_url}/{slug}" categories = [] if "wp:term" in article["_embedded"]: for category in article["_embedded"]["wp:term"][1]: categories.append( dict(term=category["slug"], label=category["name"]) ) entry = FeedEntry() entry.title(title) entry.description(description) entry.content(content) entry.author(name=author["name"], email=author["name"]) entry.link(href=link) entry.category(categories) entry.published(published) entry.updated(updated) feed.add_entry(entry, order="append") return feed
def feed_entry(notice, url_root): _id = notice.id title = f"{_id}: {notice.title}" description = notice.details published = notice.published notice_path = flask.url_for(".notice", notice_id=notice.id).lstrip("/") link = f"{url_root}{notice_path}" entry = FeedEntry() entry.id(link) entry.title(title) entry.description(description) entry.link(href=link) entry.published(f"{published} UTC") entry.author({"name": "Ubuntu Security Team"}) return entry
def _parse_html(self, html): soup = BeautifulSoup(html, 'html.parser') ret = [] for i, li in enumerate(soup.find_all('li')): e = FeedEntry() e.load_extension('dc') e.title('title: <p> #%d' % i) e.link(href='http://%d.%s' % (i, self.__URL), rel='alternate') e.dc.dc_creator('author') e.description('description: <p> #%d' % i) e.content('content: %s' % li.text, type='CDATA') e.pubdate(datetime.now(pytz.utc) + timedelta(minutes=i)) ret.append(e) return ret
def _get_entries(self): playlist = self.__api.playlistItems().list( playlistId=self.__uploads_id, part="contentDetails", maxResults=20 ).execute() videos = self.__api.videos().list( id=','.join(item['contentDetails']['videoId'] for item in playlist['items']), part='snippet,contentDetails' ).execute() ret = [] for item in videos['items']: snip = item['snippet'] duration = self.__parse_duration(aniso8601.parse_duration(item['contentDetails']['duration']).seconds) title = '%s [%s]' % (snip['title'], duration) e = FeedEntry() e.load_extension('dc') e.dc.dc_creator('none') e.title(title) e.link(href=self.__VIDEO_URL % item['id'], rel='alternate') e.description(title) e.pubdate(aniso8601.parse_datetime(snip['publishedAt'])) content_args = { 'image': snip['thumbnails']['high']['url'], 'content': self.__parse_content(snip) # TODO: some comments i think? # 'comments': } e.content(self.__CONTENT % content_args, type='CDATA') ret.append(e) return ret