def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) appendContent = "" appendTitle = "" if hasattr(item,"link") and feed.feed['link_blog']: appendContent = '<p><a href="%s">%s</a></p>' % (link, '∞') appendTitle = " →" link = item.link is_rss = isinstance(feed, Rss201rev2Feed) if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'): description = item.summary else: description = item.get_content(self.site_url) feed.add_item( title=title + appendTitle, link=link, unique_id=get_tag_uri(link, item.date), description=description + appendContent, content=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) description = item.get_content(self.site_url) try: if self.settings['FEED_USE_SUMMARY']: description = item.summary except: pass feed.add_item( title=title, link=link, unique_id='tag:%s,%s:%s' % (urlparse(link).netloc, item.date.date(), urlparse(link).path.lstrip('/')), description=description, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def filetime_from_hg(content): if isinstance(content, contents.Static): return if "date" in content.metadata: # if user did explicitely set a date, do not overwrite it return repo = hglib.open(".") tz_name = content.settings.get("TIMEZONE", None) hgtime = content.metadata.get("hgtime", "yes").lower() if hgtime in ("no", "off", "false", "0"): return # 1. file is not managed by hg # date: fs time # 2. file is staged, but has no commits # date: fs time # 3. file is managed, and clean # date: first commit time, update: last commit time or None # 4. file is managed, but dirty # date: first commit time, update: fs time path = content.source_path root = repo.root() filelog = repo.log( revrange=".:0", files=[ path, ], follow=content.settings.get("HG_FILETIME_FOLLOW", False), ) if filelog: # has commited content.date = set_date_tzinfo(filelog[-1][6], tz_name) if path in [ os.path.join(root, mfile) for flag, mfile in repo.status(modified=True) ]: # file is modified in the wd content.modified = datetime_from_timestamp( os.stat(path).st_ctime, content) else: # file is not changed if len(filelog) > 1: content.modified = set_date_tzinfo(filelog[0][6], tz_name) else: # file is not managed by hg content.date = datetime_from_timestamp(os.stat(path).st_ctime, content) if not hasattr(content, "modified"): content.modified = content.date content.locale_date = strftime(content.date, content.date_format) content.locale_modified = strftime(content.modified, content.date_format) # ensure the metadata directory is synchronized. Might be used by # some other plugin (eg. series) content.metadata["modified"] = content.modified content.metadata["date"] = content.date
def filetime_from_hg(content): if isinstance(content, contents.Static): return if 'date' in content.metadata: # if user did explicitely set a date, do not overwrite it return repo = hglib.open('.') tz_name = content.settings.get('TIMEZONE', None) hgtime = content.metadata.get('hgtime', 'yes').lower() if hgtime in ('no', 'off', 'false', '0'): return # 1. file is not managed by hg # date: fs time # 2. file is staged, but has no commits # date: fs time # 3. file is managed, and clean # date: first commit time, update: last commit time or None # 4. file is managed, but dirty # date: first commit time, update: fs time path = content.source_path root = repo.root() filelog = repo.log(revrange='.:0', files=[path,], follow=content.settings.get('HG_FILETIME_FOLLOW', False)) if filelog: # has commited content.date = set_date_tzinfo(filelog[-1][6], tz_name) if path in [os.path.join(root, mfile) for flag, mfile in repo.status(modified=True)]: # file is modified in the wd content.modified = datetime_from_timestamp( os.stat(path).st_ctime, content) else: # file is not changed if len(filelog) > 1: content.modified = set_date_tzinfo(filelog[0][6], tz_name) else: # file is not managed by hg content.date = datetime_from_timestamp(os.stat(path).st_ctime, content) if not hasattr(content, 'modified'): content.modified = content.date content.locale_date = strftime(content.date, content.date_format) content.locale_modified = strftime(content.modified, content.date_format) # ensure the metadata directory is synchronized. Might be used by # some other plugin (eg. series) content.metadata['modified'] = content.modified content.metadata['date'] = content.date
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) feed.add_item(title=title, link=link, unique_id=get_tag_uri(link, item.date), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None)) if hasattr(item, 'modified') else None)
def set_update_date(content): '''read `update' metadata or filesysystem's mtime ''' if not content._context: return updatedateSet = False for k, v in content.metadata.items(): if "update" == k.lower(): content.updatedate = get_date(v) updatedateSet = True break if not updatedateSet: if content.settings.get('UPDATEDATE_MODE', '') == 'metadata': content.updatedate = content.date else: try: content.updatedate = datetime.fromtimestamp(os.path.getmtime(content.source_path)) content.updatedate = content.updatedate.replace(microsecond = 0) except os.error: logging.error("{} not exists or not readable".format(content.source_path)) if hasattr(content, 'date') and content.date.tzinfo is not None: content.updatedate = set_date_tzinfo(content.updatedate, content.date.tzinfo.zone)
def datetime_from_timestamp(timestamp, content): """ Helper function to add timezone information to datetime, so that datetime is comparable to other datetime objects in recent versions that now also have timezone information. """ return set_date_tzinfo(datetime.fromtimestamp(timestamp), tz_name=content.settings.get("TIMEZONE", None))
def datetime_from_timestamp(timestamp, content): """ Helper function to add timezone information to datetime, so that datetime is comparable to other datetime objects in recent versions that now also have timezone information. """ return set_date_tzinfo(datetime.fromtimestamp(timestamp), tz_name=content.settings.get('TIMEZONE', None))
def get_commit_date(commit, tz_name): ''' Get datetime of commit comitted_date ''' return set_date_tzinfo( datetime.fromtimestamp(commit.committed_date), tz_name=tz_name)
def _add_item_to_the_feed(self, feed, item): """Performs an 'in-place' update of existing 'published' articles in ``feed`` by creating a new entry using the contents from the ``item`` being passed. This method is invoked by pelican's core. :param feed: A Feed instance. :param item: An article (pelican's Article object). """ title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) appendContent = "" appendTitle = "" if hasattr(item, "link"): appendContent = '<p><a href="%s">%s</a></p>' % ( link, self.settings.get('LINK_BLOG_PERMALINK_GLYPH', '∞')) appendTitle = self.settings.get('LINK_BLOG_APPEND_TITLE', '') link = item.link feed.add_item( title=title + appendTitle, link=link, unique_id=get_tag_uri(link, item.date), description=item.get_content(self.site_url) + appendContent, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.modified if hasattr(item, 'modified') else item.date, self.settings.get('TIMEZONE', None)))
def _add_item_to_the_feed(self, feed, item): if 'FEED_FOOTER_INSERT_HTML' not in self.settings: super(FeedWriter, self)._add_item_to_the_feed(feed, item) return data_dict = { 'title': item.title, 'url': item.url, 'author': item.author.name, 'authors': ','.join([x.name for x in item.authors]), 'slug': item.slug, 'category': item.category, 'summary': item.summary, } feed_foot_insert_html = self.settings['FEED_FOOTER_INSERT_HTML'] % data_dict title = Markup(item.title).striptags() description = item.get_content(self.site_url) + feed_foot_insert_html link = '%s/%s' % (self.site_url, item.url) feed.add_item( title=title, link=link, unique_id='tag:%s,%s:%s' % (urlparse(link).netloc, item.date.date(), urlparse(link).path.lstrip('/')), description=description, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.modified if hasattr(item, 'modified') else item.date, self.settings.get('TIMEZONE', None)))
def get_commit_date(commit, tz_name): ''' Get datetime of commit comitted_date ''' return set_date_tzinfo(datetime.fromtimestamp( mktime(commit.committed_date)), tz_name=tz_name)
def set_update_date(content): '''read `update' metadata or filesysystem's mtime ''' if not content._context: return updatedateSet = False for k, v in content.metadata.items(): if "update" == k.lower(): content.updatedate = get_date(v) updatedateSet = True break if not updatedateSet: if content.settings.get('UPDATEDATE_MODE', '') == 'metadata': content.updatedate = content.date else: try: content.updatedate = datetime.fromtimestamp( os.path.getmtime(content.source_path)) content.updatedate = content.updatedate.replace(microsecond=0) except os.error: logging.error("{} not exists or not readable".format( content.source_path)) if hasattr(content, 'date') and content.date.tzinfo is not None: content.updatedate = set_date_tzinfo(content.updatedate, content.date.tzinfo.zone)
def _add_item_to_the_feed(self, feed, item): if self.settings["FEED_USE_SUMMARY"]: title = Markup(item.title).striptags() link = "%s/%s" % (self.site_url, item.url) feed.add_item( title=title, link=link, unique_id="tag:%s,%s:%s" % ( urlparse(link).netloc, item.date.date(), urlparse(link).path.lstrip("/"), ), description=item.summary if hasattr(item, "summary") else item.get_content(self.site_url), categories=item.tags if hasattr(item, "tags") else None, author_name=getattr(item, "author", ""), pubdate=set_date_tzinfo( item.modified if hasattr(item, "modified") else item.date, self.settings.get("TIMEZONE", None), ), ) else: super(FeedSummaryWriter, self)._add_item_to_the_feed(feed, item)
def get_commit_date(commit, tz_name): """ Get datetime of commit comitted_date """ return set_date_tzinfo( datetime.fromtimestamp(commit.committed_date), tz_name=tz_name )
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) feed.add_item( title=title, link=link, unique_id=get_tag_uri(link, item.date), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = self.urljoiner(self.site_url, item.url) if isinstance(feed, Rss201rev2Feed): # RSS feeds use a single tag called 'description' for both the full # content and the summary content = None if self.settings.get('RSS_FEED_SUMMARY_ONLY'): description = item.summary else: description = item.get_content(self.site_url) else: # Atom feeds have two different tags for full content (called # 'content' by feedgenerator) and summary (called 'description' by # feedgenerator). # # It does not make sense to have the summary be the # exact same thing as the full content. If we detect that # they are we just remove the summary. content = item.get_content(self.site_url) description = item.summary if description == content: description = None categories = list() if hasattr(item, 'category'): categories.append(item.category) if hasattr(item, 'tags'): categories.extend(item.tags) feed.add_item( title=title, link=link, unique_id=get_tag_uri(link, item.date), description=description, content=content, categories=categories if categories else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def from_article(cls, json_feed_generator, article): title = Markup(article.title).striptags() summary = article._summary if hasattr(article, '_summary') else None item_url = json_feed_generator.build_url(article.url) return cls( item_id=item_url, url=item_url, title=title, content_html=article.content, summary=summary, date_published=set_date_tzinfo( article.date, json_feed_generator.settings.get('TIMEZONE', None)).isoformat(), date_modified=set_date_tzinfo( article.modified, json_feed_generator.settings.get('TIMEZONE', None) ).isoformat() if hasattr(article, 'modified') else None, author=Author.from_pelican_author(json_feed_generator, getattr(article, 'author', '')), tags=[tag.name for tag in article.tags] if hasattr(article, 'tags') else None )
def _add_item_to_the_feed(self, feed, item): feed.add_item(title=item.title, link='%s/%s' % (self.site_url, item.url), unique_id='%s/%s' % (self.site_url, item.url), description=item.content, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', 'John Doe'), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)))
def _add_item_to_the_feed(self, feed, item): feed.add_item( title=item.title, link='%s/%s' % (self.site_url, item.url), description=item.content, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', 'John Doe'), pubdate=set_date_tzinfo(item.date, self.settings.get('TIMEZONE', None)))
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = self.urljoiner(self.site_url, item.url) is_rss = isinstance(feed, Rss201rev2Feed) if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'): description = item.summary else: description = item.get_content(self.site_url) feed.add_item(title=title, link=link, unique_id=get_tag_uri(link, item.date), description=description, content=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None)) if hasattr(item, 'modified') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = self.urljoiner(self.site_url, item.url) is_rss = isinstance(feed, Rss201rev2Feed) if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'): description = item.summary else: description = item.get_content(self.site_url) feed.add_item( title=title, link=link, unique_id=get_tag_uri(link, item.date), description=description, content=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() feed.add_item( title=title, link='%s/%s' % (self.site_url, item.url), unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), item.date.date(), item.url), description=item.get_content(self.site_url, content_type='FEED'), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo(item.date, self.settings.get('TIMEZONE', None)))
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() feed.add_item( title=title, link="%s/%s" % (self.site_url, item.url), unique_id="tag:%s,%s:%s" % (self.site_url.replace("http://", ""), item.date.date(), item.url), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, "tags") else None, author_name=getattr(item, "author", ""), pubdate=set_date_tzinfo(item.date, self.settings.get("TIMEZONE", None)), )
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = self.urljoiner(self.site_url, item.url) if isinstance(feed, Rss201rev2Feed): # RSS feeds use a single tag called 'description' for both the full # content and the summary content = None if self.settings.get('RSS_FEED_SUMMARY_ONLY'): description = item.summary else: description = item.get_content(self.site_url) else: # Atom feeds have two different tags for full content (called # 'content' by feedgenerator) and summary (called 'description' by # feedgenerator). # # It does not make sense to have the summary be the # exact same thing as the full content. If we detect that # they are we just remove the summary. content = item.get_content(self.site_url) description = item.summary if description == content: description = None feed.add_item( title=title, link=link, unique_id=get_tag_uri(link, item.date), description=description, content=content, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None) ) if hasattr(item, 'modified') else None)
def from_article(cls, json_feed_generator, article): title = Markup(article.title).striptags() summary = article._summary if hasattr(article, '_summary') else None item_url = json_feed_generator.build_url(article.url) return cls(item_id=item_url, url=item_url, title=title, content_html=article.content, summary=summary, date_published=set_date_tzinfo( article.date, json_feed_generator.settings.get('TIMEZONE', None)).isoformat(), date_modified=set_date_tzinfo( article.modified, json_feed_generator.settings.get( 'TIMEZONE', None)).isoformat() if hasattr( article, 'modified') else None, author=Author.from_pelican_author( json_feed_generator, getattr(article, 'author', '')), tags=[tag.name for tag in article.tags] if hasattr( article, 'tags') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() feed.add_item( title=title, link='%s/%s' % (self.site_url, item.url), unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), item.date.date(), item.url), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo(item.date, self.settings.get('TIMEZONE', None)))
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = '%s/%s' % (self.site_url, item.url) description = item.get_content(self.site_url) try: if self.settings['FEED_USE_SUMMARY']: description = item.summary except: pass feed.add_item(title=title, link=link, unique_id='tag:%s,%s:%s' % (urlparse(link).netloc, item.date.date(), urlparse(link).path.lstrip('/')), description=description, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( item.date, self.settings.get('TIMEZONE', None)), updateddate=set_date_tzinfo( item.modified, self.settings.get('TIMEZONE', None)) if hasattr(item, 'modified') else None)
def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = "%s/%s" % (self.site_url, item.url) feed.add_item( title=title, link=link, unique_id="tag:%s,%s:%s" % (urlparse(link).netloc, item.date.date(), urlparse(link).path.lstrip("/")), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, "tags") else None, author_name=getattr(item, "author", ""), pubdate=set_date_tzinfo( item.modified if hasattr(item, "modified") else item.date, self.settings.get("TIMEZONE", None) ), )
def _add_item_to_the_feed(self, feed, item): # @jb: The code assumed `item.url` was relative. link = urlparse.urljoin(self.site_url, item.url) title = Markup(item.title).striptags() # Try to get summary. If no summary's present, use the entire contents. summary = item.summary or item.get_content(self.site_url) feed.add_item( title=title, link=link, unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), item.date.date(), item.url), description=summary, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo(item.date, self.settings.get('TIMEZONE', None)))
def duplicate_on_dates(generator): """ Articles (events) with `dates` property are recurring. Create a copy of the article for each date in `dates`. """ articles = [] for article in generator.articles: if not hasattr(article, 'dates'): articles.append(article) continue log.debug('Event {} has {} occurrences.'.format(article.get_relative_source_path(), len(article.dates))) for i, date in enumerate(article.dates, 2): event = copy(article) articles.append(event) event.slug += '--' + str(i) # Create hopefully unique slug # The comment following '#' can be anything (e.g. visitor count) date, _, event.dates_comment = date.partition('#') # From pelican.contents.Content.__init__ timezone = getattr(event, 'timezone', event.settings.get('TIMEZONE', 'UTC')) event.date = set_date_tzinfo(get_date(date), timezone) event.locale_date = strftime(event.date, event.date_format) articles.sort(key=attrgetter(generator.settings['ARTICLE_ORDER_BY']), reverse=True) generator.articles = articles
def _add_item_to_the_feed(self, feed, item): """Performs an 'in-place' update of existing 'published' articles in ``feed`` by creating a new entry using the contents from the ``item`` being passed. This method is invoked by pelican's core. :param feed: A PodcastFeed instance. :param item: An article (pelican's Article object). """ # Local copy of iTunes attributes to add to the feed. article = DEFAULT_ITEM_ELEMENTS.copy() article_values_map = [ [ "link", lambda calee, item, article: "{0}/{1}".format( calee.site_url, item.url), ], [ "title", lambda calee, item, article: Markup(item.title).striptags(), ], # NOQA E231 [ "itunes:summary", lambda calee, item, article: item.description if hasattr( item, "description") else Markup(item.summary).striptags(), ], [ "description", lambda calee, item, article: "<![CDATA[{}]]>".format( Markup(item.summary)), ], [ "pubDate", lambda calee, item, article: rfc2822_date( set_date_tzinfo( item.modified if hasattr(item, "modified") else item.date, self.settings.get("TIMEZONE", None), )), ], ["itunes:author", lambda calee, item, article: item.author.name], [ "itunes:subtitle", lambda calee, item, article: Markup(item.subtitle).striptags() if hasattr(item, "subtitle") else None, ], [ "itunes:image", lambda calee, item, article: { "href": "{0}{1}".format(self.site_url, item.image) } if hasattr(item, "image") else None, ], [ "guid", lambda calee, item, article: item.guid if hasattr(item, "guid") else article["link"], ], ] def update_article(item, article, *args): if len(args) == 2: args = args + (None, ) _key, value, default = args try: val = value(self, item, article) if val: article[_key] = val except Exception as e: logger.warning("Exception %s", e) for article_value_map in article_values_map: update_article(item, article, *article_value_map) # get file path to podcast attachment file. def get_attachment_filepath(settings): siteurl = self.settings.get("SITEURL", None) content_path = self.settings.get("PATH", None) if item.podcast.startswith(siteurl): return f"{content_path}/{item.podcast[len(siteurl):]}" def get_attachment_url(settings): return item.podcast enclosure = {"url": get_attachment_url(self.settings)} if hasattr(item, "length"): enclosure["length"] = item.length if hasattr(item, "duration"): article["itunes:duration"] = item.duration filepath = get_attachment_filepath(self.settings) if filepath and os.path.exists(filepath): enclosure["length"] = str(os.path.getsize(filepath)) audiofile = mutagen.File(filepath) (enclosure["type"], ) = set( audiofile.mime) & set(SUPPORTED_MIME_TYPES) article["itunes:duration"] = str(int(audiofile.info.length)) article["enclosure"] = enclosure # Add the new article to the feed. feed.add_item(**article)
def _add_item_to_the_feed(self, feed, item): """Performs an 'in-place' update of existing 'published' articles in ``feed`` by creating a new entry using the contents from the ``item`` being passed. This method is invoked by pelican's core. :param feed: A PodcastFeed instance. :param item: An article (pelican's Article object). """ # Local copy of iTunes attributes to add to the feed. items = DEFAULT_ITEM_ELEMENTS.copy() # Link to the new article. # http://example.com/episode-01 items['link'] = '{0}/{1}'.format(self.site_url, item.url) # Title for the article. # ex: <title>Episode Title</title> items['title'] = Markup(item.title).striptags() # Summary for the article. This can be obtained either from # a ``:description:`` or a ``:summary:`` directive. # ex: <itunes:summary>In this episode... </itunes:summary> if hasattr(item, 'description'): items['itunes:summary'] = item.description else: items['itunes:summary'] = Markup(item.summary).striptags() items['description'] = Markup(item.summary).striptags() # Hack: Spotify treats line feeds (LF) as HTML line breaks (<br>) # Minify rendered content to avoid this. content = Markup( htmlmin.minify(item.content, remove_optional_attribute_quotes=False)) items['content:encoded'] = Markup("<![CDATA[{}]]>").format(content) # Date the article was last modified. # ex: <pubDate>Fri, 13 Jun 2014 04:59:00 -0300</pubDate> items['pubDate'] = rfc2822_date( set_date_tzinfo( item.modified if hasattr(item, 'modified') else item.date, self.settings.get('TIMEZONE', None))) # Name(s) for the article's author(s). # ex: <itunes:author>John Doe</itunes:author> if hasattr(item, 'author'): items['itunes:author'] = item.author.name # Subtitle for the article. # ex: <itunes:subtitle>My episode subtitle</itunes:subtitle> if hasattr(item, 'subtitle'): items['itunes:subtitle'] = Markup(item.subtitle).striptags() # Ex: # <itunes:image href="http://example.com/Episodio1.jpg" /> if hasattr(item, 'image'): items['itunes:image'] = { 'href': '{0}{1}'.format(self.site_url, item.image) } # Information about the episode audio. # ex: <enclosure url="http://example.com/episode.m4a" # length="872731" type="audio/x-m4a" /> if hasattr(item, 'podcast'): enclosure = {'url': item.podcast} # Include the file size if available. if hasattr(item, 'length'): enclosure['length'] = item.length # Include the audio mime type if available... if hasattr(item, 'mimetype'): enclosure['type'] = item.mimetype else: # ... or default to 'audio/mpeg'. enclosure['type'] = 'audio/mpeg' items['enclosure'] = enclosure # Duration for the audio file. # <itunes:duration>7:04</itunes:duration> if hasattr(item, 'duration'): items['itunes:duration'] = item.duration # Unique identifier for the episode. # Use a 'guid' if available... # ex: <guid>http://example.com/aae20050615.m4a</guid> if hasattr(item, 'guid'): items['guid'] = item.guid # ... else, use the article's link instead. # ex: <guid>http://example.com/episode-01</guid> else: items['guid'] = items['link'] # Add the new article to the feed. feed.add_item(**items)
def __init__(self, content, metadata=None, settings=None, source_path=None, context=None): if metadata is None: metadata = {} if settings is None: settings = copy.deepcopy(DEFAULT_CONFIG) self.settings = settings self._content = content if context is None: context = {} self._context = context self.translations = [] local_metadata = dict() local_metadata.update(metadata) # set metadata as attributes for key, value in local_metadata.items(): if key in ('save_as', 'url'): key = 'override_' + key setattr(self, key.lower(), value) # also keep track of the metadata attributes available self.metadata = local_metadata # default template if it's not defined in page self.template = self._get_template() # First, read the authors from "authors", if not, fallback to "author" # and if not use the settings defined one, if any. if not hasattr(self, 'author'): if hasattr(self, 'authors'): self.author = self.authors[0] elif 'AUTHOR' in settings: self.author = Author(settings['AUTHOR'], settings) if not hasattr(self, 'authors') and hasattr(self, 'author'): self.authors = [self.author] # XXX Split all the following code into pieces, there is too much here. # manage languages self.in_default_lang = True if 'DEFAULT_LANG' in settings: default_lang = settings['DEFAULT_LANG'].lower() if not hasattr(self, 'lang'): self.lang = default_lang self.in_default_lang = (self.lang == default_lang) # create the slug if not existing, generate slug according to # setting of SLUG_ATTRIBUTE if not hasattr(self, 'slug'): if (settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title')): value = self.title elif (settings['SLUGIFY_SOURCE'] == 'basename' and source_path is not None): value = os.path.basename(os.path.splitext(source_path)[0]) else: value = None if value is not None: self.slug = slugify( value, regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', []), preserve_case=settings.get('SLUGIFY_PRESERVE_CASE', False), use_unicode=settings.get('SLUGIFY_USE_UNICODE', False)) self.source_path = source_path self.relative_source_path = self.get_relative_source_path() # manage the date format if not hasattr(self, 'date_format'): if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']: self.date_format = settings['DATE_FORMATS'][self.lang] else: self.date_format = settings['DEFAULT_DATE_FORMAT'] if isinstance(self.date_format, tuple): locale_string = self.date_format[0] locale.setlocale(locale.LC_ALL, locale_string) self.date_format = self.date_format[1] # manage timezone default_timezone = settings.get('TIMEZONE', 'UTC') timezone = getattr(self, 'timezone', default_timezone) self.timezone = pytz.timezone(timezone) if hasattr(self, 'date'): self.date = set_date_tzinfo(self.date, timezone) self.locale_date = self.date.strftime(self.date_format) if hasattr(self, 'modified'): self.modified = set_date_tzinfo(self.modified, timezone) self.locale_modified = self.modified.strftime(self.date_format) # manage status if not hasattr(self, 'status'): # Previous default of None broke comment plugins and perhaps others self.status = getattr(self, 'default_status', '') # store the summary metadata if it is set if 'summary' in metadata: self._summary = metadata['summary'] signals.content_object_init.send(self)
def __init__(self, content, metadata=None, settings=None, source_path=None, context=None): if metadata is None: metadata = {} if settings is None: settings = copy.deepcopy(DEFAULT_CONFIG) self.settings = settings self._content = content if context is None: context = {} self._context = context self.translations = [] local_metadata = dict() local_metadata.update(metadata) # set metadata as attributes for key, value in local_metadata.items(): if key in ('save_as', 'url'): key = 'override_' + key setattr(self, key.lower(), value) # also keep track of the metadata attributes available self.metadata = local_metadata # default template if it's not defined in page self.template = self._get_template() # First, read the authors from "authors", if not, fallback to "author" # and if not use the settings defined one, if any. if not hasattr(self, 'author'): if hasattr(self, 'authors'): self.author = self.authors[0] elif 'AUTHOR' in settings: self.author = Author(settings['AUTHOR'], settings) if not hasattr(self, 'authors') and hasattr(self, 'author'): self.authors = [self.author] # XXX Split all the following code into pieces, there is too much here. # manage languages self.in_default_lang = True if 'DEFAULT_LANG' in settings: default_lang = settings['DEFAULT_LANG'].lower() if not hasattr(self, 'lang'): self.lang = default_lang self.in_default_lang = (self.lang == default_lang) # create the slug if not existing, generate slug according to # setting of SLUG_ATTRIBUTE if not hasattr(self, 'slug'): if (settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title')): self.slug = slugify(self.title, settings.get('SLUG_SUBSTITUTIONS', ())) elif (settings['SLUGIFY_SOURCE'] == 'basename' and source_path is not None): basename = os.path.basename(os.path.splitext(source_path)[0]) self.slug = slugify(basename, settings.get('SLUG_SUBSTITUTIONS', ())) self.source_path = source_path # manage the date format if not hasattr(self, 'date_format'): if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']: self.date_format = settings['DATE_FORMATS'][self.lang] else: self.date_format = settings['DEFAULT_DATE_FORMAT'] if isinstance(self.date_format, tuple): locale_string = self.date_format[0] if sys.version_info < (3, ) and isinstance(locale_string, six.text_type): locale_string = locale_string.encode('ascii') locale.setlocale(locale.LC_ALL, locale_string) self.date_format = self.date_format[1] # manage timezone default_timezone = settings.get('TIMEZONE', 'UTC') timezone = getattr(self, 'timezone', default_timezone) if hasattr(self, 'date'): self.date = set_date_tzinfo(self.date, timezone) self.locale_date = strftime(self.date, self.date_format) if hasattr(self, 'modified'): self.modified = set_date_tzinfo(self.modified, timezone) self.locale_modified = strftime(self.modified, self.date_format) # manage status if not hasattr(self, 'status'): self.status = settings['DEFAULT_STATUS'] if not settings['WITH_FUTURE_DATES'] and hasattr(self, 'date'): if self.date.tzinfo is None: now = SafeDatetime.now() else: now = SafeDatetime.utcnow().replace(tzinfo=pytz.utc) if self.date > now: self.status = 'draft' # store the summary metadata if it is set if 'summary' in metadata: self._summary = metadata['summary'] signals.content_object_init.send(self)
def datetime_from_timestamp(timestamp, content): return utils.set_date_tzinfo(datetime.fromtimestamp(timestamp), tz_name=content.settings.get( 'TIMEZONE', None))
def get_commit_date(commit, tz_name): """ Get datetime of commit comitted_date """ return set_date_tzinfo(datetime.fromtimestamp(mktime(commit.committed_date) - altzone), tz_name=tz_name)
def __init__(self, content, metadata=None, settings=None, source_path=None, context=None): if metadata is None: metadata = {} if settings is None: settings = copy.deepcopy(DEFAULT_CONFIG) self.settings = settings self._content = content if context is None: context = {} self._context = context self.translations = [] local_metadata = dict() local_metadata.update(metadata) # set metadata as attributes for key, value in local_metadata.items(): if key in ('save_as', 'url'): key = 'override_' + key setattr(self, key.lower(), value) # also keep track of the metadata attributes available self.metadata = local_metadata # default template if it's not defined in page self.template = self._get_template() # First, read the authors from "authors", if not, fallback to "author" # and if not use the settings defined one, if any. if not hasattr(self, 'author'): if hasattr(self, 'authors'): self.author = self.authors[0] elif 'AUTHOR' in settings: self.author = Author(settings['AUTHOR'], settings) if not hasattr(self, 'authors') and hasattr(self, 'author'): self.authors = [self.author] # XXX Split all the following code into pieces, there is too much here. # manage languages self.in_default_lang = True if 'DEFAULT_LANG' in settings: default_lang = settings['DEFAULT_LANG'].lower() if not hasattr(self, 'lang'): self.lang = default_lang self.in_default_lang = (self.lang == default_lang) # create the slug if not existing, generate slug according to # setting of SLUG_ATTRIBUTE if not hasattr(self, 'slug'): if (settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title')): self.slug = slugify(self.title, settings.get('SLUG_SUBSTITUTIONS', ())) elif (settings['SLUGIFY_SOURCE'] == 'basename' and source_path is not None): basename = os.path.basename( os.path.splitext(source_path)[0]) self.slug = slugify( basename, settings.get('SLUG_SUBSTITUTIONS', ())) self.source_path = source_path # manage the date format if not hasattr(self, 'date_format'): if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']: self.date_format = settings['DATE_FORMATS'][self.lang] else: self.date_format = settings['DEFAULT_DATE_FORMAT'] if isinstance(self.date_format, tuple): locale_string = self.date_format[0] if sys.version_info < (3, ) and isinstance(locale_string, six.text_type): locale_string = locale_string.encode('ascii') locale.setlocale(locale.LC_ALL, locale_string) self.date_format = self.date_format[1] # manage timezone default_timezone = settings.get('TIMEZONE', 'UTC') timezone = getattr(self, 'timezone', default_timezone) if hasattr(self, 'date'): self.date = set_date_tzinfo(self.date, timezone) self.locale_date = strftime(self.date, self.date_format) if hasattr(self, 'modified'): self.modified = set_date_tzinfo(self.modified, timezone) self.locale_modified = strftime(self.modified, self.date_format) # manage status if not hasattr(self, 'status'): self.status = getattr(self, 'default_status', None) if len(self._context.get('filenames', [])) > 0: self.refresh_metadata_intersite_links() signals.content_object_init.send(self)
def _add_item_to_the_feed(self, feed, item): """Performs an 'in-place' update of existing 'published' articles in ``feed`` by creating a new entry using the contents from the ``item`` being passed. This method is invoked by pelican's core. :param feed: A PodcastFeed instance. :param item: An article (pelican's Article object). """ siteurl = self.settings.get('PODCAST_FEED_SITE_URL', item.settings['SITEURL']) if siteurl.startswith('//'): siteurl = 'http:' + siteurl # Local copy of iTunes attributes to add to the feed. items = DEFAULT_ITEM_ELEMENTS.copy() # Link to the new article. # http://example.com/episode-01 items['link'] = '{0}/{1}'.format(self.site_url, item.url) # Title for the article. # ex: <title>Episode Title</title> items['title'] = Markup(item.title).striptags() # Summary for the article. This can be obtained either from # a ``:description:`` or a ``:summary:`` directive. # ex: <itunes:summary>In this episode... </itunes:summary> if hasattr(item, 'description'): items['itunes:summary'] = item.description else: items['itunes:summary'] = Markup(item.summary).striptags() items['description'] = "<![CDATA[{}]]>".format( '<img src="{}{}" title="{}" alt="{}" />'.format( siteurl, item.image_wide, item.title, item.title ) ) description = unicode(Markup(item.summary)).replace("<html><body>", "") description = description.replace("</body></html>", "") description += 'Leia o restante do show notes no <a href="{}">site</a>.'.format( siteurl + '/' + item.url ) items['description'] += "<![CDATA[{}]]>".format(description) # Date the article was last modified. # ex: <pubDate>Fri, 13 Jun 2014 04:59:00 -0300</pubDate> items['pubDate'] = rfc2822_date( set_date_tzinfo( item.modified if hasattr(item, 'modified') else item.date, self.settings.get('TIMEZONE', None)) ) # Name(s) for the article's author(s). # ex: <itunes:author>John Doe</itunes:author> if hasattr(item, 'author'): items['itunes:author'] = item.author.name # Subtitle for the article. # ex: <itunes:subtitle>My episode subtitle</itunes:subtitle> if hasattr(item, 'subtitle'): items['itunes:subtitle'] = Markup(item.subtitle).striptags() # Ex: # <itunes:image href="http://example.com/Episodio1.jpg" /> if hasattr(item, 'image'): items['itunes:image'] = { 'href': '{0}{1}'.format(self.site_url, item.image)} # Information about the episode audio. # ex: <enclosure url="http://example.com/episode.m4a" # length="872731" type="audio/x-m4a" /> if hasattr(item, 'podcast'): url = "http://archive.org/download/{}/{}.mp3".format(item.podcast, item.podcast) enclosure = {'url': url} # Include the file size if available. if hasattr(item, 'length'): enclosure['length'] = item.length # Include the audio mime type if available... if hasattr(item, 'mimetype'): enclosure['type'] = item.mimetype else: # ... or default to 'audio/mpeg'. enclosure['type'] = 'audio/mpeg' items['enclosure'] = enclosure # Duration for the audio file. # <itunes:duration>7:04</itunes:duration> if hasattr(item, 'duration'): items['itunes:duration'] = item.duration # Unique identifier for the episode. # Use a 'guid' if available... # ex: <guid>http://example.com/aae20050615.m4a</guid> if hasattr(item, 'guid'): items['guid'] = item.guid # ... else, use the article's link instead. # ex: <guid>http://example.com/episode-01</guid> else: items['guid'] = items['link'] # Add the new article to the feed. feed.add_item(**items)
def __init__(self, content, metadata=None, settings=None, source_path=None, context=None): if metadata is None: metadata = {} if settings is None: settings = copy.deepcopy(DEFAULT_CONFIG) self.settings = settings self._content = content if context is None: context = {} self._context = context self.translations = [] local_metadata = dict() local_metadata.update(metadata) # set metadata as attributes for key, value in local_metadata.items(): if key in ("save_as", "url"): key = "override_" + key setattr(self, key.lower(), value) # also keep track of the metadata attributes available self.metadata = local_metadata # default template if it's not defined in page self.template = self._get_template() # First, read the authors from "authors", if not, fallback to "author" # and if not use the settings defined one, if any. if not hasattr(self, "author"): if hasattr(self, "authors"): self.author = self.authors[0] elif "AUTHOR" in settings: self.author = Author(settings["AUTHOR"], settings) if not hasattr(self, "authors") and hasattr(self, "author"): self.authors = [self.author] # XXX Split all the following code into pieces, there is too much here. # manage languages self.in_default_lang = True if "DEFAULT_LANG" in settings: default_lang = settings["DEFAULT_LANG"].lower() if not hasattr(self, "lang"): self.lang = default_lang self.in_default_lang = self.lang == default_lang # create the slug if not existing, generate slug according to # setting of SLUG_ATTRIBUTE if not hasattr(self, "slug"): if settings["SLUGIFY_SOURCE"] == "title" and hasattr(self, "title"): self.slug = slugify(self.title, settings.get("SLUG_SUBSTITUTIONS", ())) elif settings["SLUGIFY_SOURCE"] == "basename" and source_path is not None: basename = os.path.basename(os.path.splitext(source_path)[0]) self.slug = slugify(basename, settings.get("SLUG_SUBSTITUTIONS", ())) self.source_path = source_path # manage the date format if not hasattr(self, "date_format"): if hasattr(self, "lang") and self.lang in settings["DATE_FORMATS"]: self.date_format = settings["DATE_FORMATS"][self.lang] else: self.date_format = settings["DEFAULT_DATE_FORMAT"] if isinstance(self.date_format, tuple): locale_string = self.date_format[0] if sys.version_info < (3,) and isinstance(locale_string, six.text_type): locale_string = locale_string.encode("ascii") locale.setlocale(locale.LC_ALL, locale_string) self.date_format = self.date_format[1] # manage timezone default_timezone = settings.get("TIMEZONE", "UTC") timezone = getattr(self, "timezone", default_timezone) if hasattr(self, "date"): self.date = set_date_tzinfo(self.date, timezone) self.locale_date = strftime(self.date, self.date_format) if hasattr(self, "modified"): self.modified = set_date_tzinfo(self.modified, timezone) self.locale_modified = strftime(self.modified, self.date_format) # manage status if not hasattr(self, "status"): self.status = settings["DEFAULT_STATUS"] if not settings["WITH_FUTURE_DATES"] and hasattr(self, "date"): if self.date.tzinfo is None: now = SafeDatetime.now() else: now = SafeDatetime.utcnow().replace(tzinfo=pytz.utc) if self.date > now: self.status = "draft" # store the summary metadata if it is set if "summary" in metadata: self._summary = metadata["summary"] signals.content_object_init.send(self)