def out_rss(self,filename): fg=FeedGenerator() fg.register_extension('albopop',AlbopopExtension,AlbopopEntryExtension) fg.id(self.url) fg.title(self.title) fg.description(self.title) fg.author({'name':'alboPOP','email':''}) fg.link(href=self.url) fg.pubDate(formatdate()) fg.webMaster(self.webMaster) fg.docs('https://github.com/mfortini/alboPOP_saga') fg.language('it') fg.albopop.categoryName(self.categoryName) fg.albopop.categoryType(self.categoryType) for item in self.items: fe=fg.add_entry() fe.id(item['link']) fe.category(term=item['tipo']) fe.pubdate(item['pubDate']) fe.link(href=item['link']) fe.title(item['title']) fe.description(item['description']) fe.albopop.categoryUID(str(item['numero'])+'/'+str(item['anno'])) fg.rss_file(filename)
def main(): session = vk.Session() api = vk.API(session) group_id = '96469126' group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid']) assert len(group_info) == 1 group_info = group_info[0] url = 'http://vk.com/club{}'.format(group_info['gid']) # a = api.wall.get(owner_id=-1 * group_info['gid']) # # with open('out', 'wb') as fio: # pickle.dump(a, fio) with open('out', 'rb') as fio: data = pickle.loads(fio.read()) assert len(data) > 1 fg = FeedGenerator() fg.id(url) fg.title(_(group_info['name'])) fg.description(_(group_info['description'])) fg.logo(group_info['photo']) site_url = group_info.get('site', url) if group_info.get('site', url) else url fg.link(href=_(site_url)) fg.link(href=_(site_url), rel='self') fg.link(href=_(site_url), rel='alternate') fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'}) fg.webMaster('[email protected] (Alexander Sapronov)') pat = re.compile(r"#(\w+)") for x in data[1:]: post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id']) e = fg.add_entry() # text = x.get('text', '').replace('<br>', '\n') text = x.get('text', '') e.description(_(text)) e.author({'name': _(get_author_name(api, x.get('from_id')))}) e.id(post_link) e.link(href=_(post_link)) e.link(href=_(post_link), rel='alternate') tags = pat.findall(text) title = x.get('text', '') for tag in tags: e.category(term=_(tag)) title = title.replace('#{}'.format(tag), '') title = re.sub('<[^<]+?>', ' ', title) title = textwrap.wrap(title, width=80)[0] e.title(_(title.strip())) fg.rss_file('rss.xml')
def generate_feed(input_file, output_file): fg = FeedGenerator() fg.load_extension('podcast', rss=True) ## RSS tags # Required fg.title(TITLE) fg.link(href=LINK) fg.description(DESCRIPTION) # Optional fg.language('en') fg.image(url=IMAGE_URL, title=TITLE, link=LINK) fg.ttl(720) fg.webMaster(CONTACT['name']) now = datetime.datetime.now() tz = pytz.timezone('Europe/Amsterdam') fg.pubDate(tz.localize(now)) # iTunes fg.podcast.itunes_author('Dan LeBatard') fg.podcast.itunes_category(itunes_category='Sports & Recreation', itunes_subcategory='Professional') fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit='clean') fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email']) # Add items items = read_items(input_file) for item in items: fe = fg.add_entry() ## RSS tags fe.id(item['guid']) fe.title(item['title']) fe.description(item['description']) fe.enclosure(item['link'], 0, 'audio/mpeg') fe.pubdate(item['pubDate']) # Finish off the file fg.rss_str(pretty=True) fg.rss_file(output_file)
class Feed: def __init__(self, url: str, name: str, email: str, title: str = None, generator: str = None, generator_version: str = None, logo: str = None, icon: str = None, description: str = None, language: str = None) -> None: self.name = name self.email = email self.fg = FeedGenerator() self.fg.id(url + "feed.atom") self.fg.link(href=url + "feed.xml", rel="self") self.fg.link(href=url, rel="alternate") self.fg.author(name=name, email=email) self.fg.contributor(name=name, email=email) self.fg.managingEditor(email) self.fg.webMaster(email) self.fg.title(title) self.fg.generator(generator=generator, version=generator_version) self.fg.logo(logo) self.fg.icon(icon) self.fg.description(description) self.fg.language(language) def add(self, article: Article) -> None: feed_entry = self.fg.add_entry() feed_entry.id(article.url) feed_entry.title(article.title) feed_entry.link(href=article.url) feed_entry.guid(guid=article.url, permalink=True) feed_entry.author(name=self.name, email=self.email) feed_entry.summary(article.description or article.snippet) feed_entry.content(content=article.content, type="CDATA") feed_entry.published(article.date) if article.date: feed_entry.published(article.date) feed_entry.updated(article.date) else: epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) feed_entry.published(epoch) feed_entry.updated(epoch) def add_from_blog(self, url: str) -> None: blog = Blog(url) if not self.fg.title(): self.fg.title(blog.title) for article in blog.articles: self.add(article) def atom(self) -> bytes: return self.fg.atom_str(pretty=True) def rss(self) -> bytes: return self.fg.rss_str(pretty=True) def atom_file(self, filename: str = "feed.atom") -> None: self.fg.atom_file(filename, pretty=True) def rss_file(self, filename: str = "feed.xml") -> None: self.fg.rss_file(filename, pretty=True)
def write_rss(self, audio=False): """Write podcast feeds to files.""" print("playlist self.info", flush=True) pp.pprint(self.info) prefix = "audio-" if audio else "" feed_url = self.controller.base_url + self.folder + '/' + prefix + 'podcast.xml' feedgen = FeedGenerator() feedgen.load_extension('podcast') feedgen.generator('Adafruit-Podcast') feedgen.id(feed_url) feedgen.title(self.info['title']) feedgen.subtitle(self.info['itunesSubtitle']) feedgen.author({'name': self.info['author']}) for category in self.info['categories']: feedgen.category(term=category) feedgen.webMaster(self.info['webMaster']) feedgen.managingEditor(self.info['managingEditor']) feedgen.link(href=feed_url, rel='self') # Link to a chosen URL as an alternate, if set. if 'htmlUrl' in self.info: feedgen.link(href=self.info['htmlUrl'], rel='alternate') else: # Otherwise link to the original YouTube playlist as an alternate: if isinstance(self.url, list): for url in self.url: feedgen.link(href=url, rel='alternate') else: feedgen.link(href=self.url, rel='alternate') feedgen.language('en') # feedgen.logo('http://ex.com/logo.jpg') # pylint: disable=no-member feedgen.podcast.itunes_category(self.info['itunesCategory']['text']) feedgen.podcast.itunes_subtitle(self.info['itunesSubtitle']) feedgen.podcast.itunes_summary(self.info['description']) feedgen.podcast.itunes_owner(email=self.info['itunesOwner']['email'], name=self.info['itunesOwner']['name']) feedgen.podcast.itunes_author(self.info['itunesOwner']['name']) feedgen.podcast.itunes_image(self.controller.base_url + self.folder + '/image.jpg') feedgen.podcast.itunes_explicit('clean') for vid in self.videos: print("vid:\n", flush=True) pp.pprint(vid) print("\n", flush=True) vid_filename = vid['_filename'].split('.')[0] + (".mp3" if audio else ".mp4") vid_url = self.video_url(vid_filename) # Size of enclosed file in bytes: vid_size = os.path.getsize(vid_filename) # Date of upload (from the youtube-dl JSON data) eastern = pytz.timezone('US/Eastern') vid_date = eastern.localize( datetime.datetime.strptime(vid['upload_date'], '%Y%m%d')) entry = feedgen.add_entry() entry.id(vid_url) entry.title(vid['fulltitle']) entry.published(vid_date) for category in vid['categories']: entry.category(term=category) entry.description(vid['description']) entry.enclosure(vid_url, str(vid_size), ('audio/mp3' if audio else 'video/mp4')) entry.podcast.itunes_image(self.controller.base_url + self.folder + '/image.jpg') entry.podcast.itunes_author(self.info['author']) entry.podcast.itunes_summary(vid['description']) entry.podcast.itunes_duration(vid['duration']) feedgen.rss_str(pretty=True) # Ensure output folder for this podcast exists: os.makedirs(os.path.join(self.controller.output_dir, self.folder), exist_ok=True) # Generate RSS file in output folder: feedgen.rss_file( os.path.join(self.controller.output_dir, self.folder, prefix + 'podcast.xml'))
def setUp(self): fg = FeedGenerator() self.nsAtom = "http://www.w3.org/2005/Atom" self.nsRss = "http://purl.org/rss/1.0/modules/content/" self.feedId = 'http://lernfunk.de/media/654321' self.title = 'Some Testfeed' self.authorName = 'John Doe' self.authorMail = '*****@*****.**' self.author = {'name': self.authorName, 'email': self.authorMail} self.linkHref = 'http://example.com' self.linkRel = 'alternate' self.logo = 'http://ex.com/logo.jpg' self.subtitle = 'This is a cool feed!' self.link2Href = 'http://larskiesow.de/test.atom' self.link2Rel = 'self' self.language = 'en' self.categoryTerm = 'This category term' self.categoryScheme = 'This category scheme' self.categoryLabel = 'This category label' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.icon = "http://example.com/icon.png" self.contributor = { 'name': "Contributor Name", 'uri': "Contributor Uri", 'email': 'Contributor email' } self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.managingEditor = '*****@*****.**' self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \ '1 r (SS~~000 1))' self.skipDays = 'Tuesday' self.skipHours = 23 self.textInputTitle = "Text input title" self.textInputDescription = "Text input description" self.textInputName = "Text input name" self.textInputLink = "Text input link" self.ttl = 900 self.webMaster = '*****@*****.**' fg.id(self.feedId) fg.title(self.title) fg.author(self.author) fg.link(href=self.linkHref, rel=self.linkRel) fg.logo(self.logo) fg.subtitle(self.subtitle) fg.link(href=self.link2Href, rel=self.link2Rel) fg.language(self.language) fg.cloud(domain=self.cloudDomain, port=self.cloudPort, path=self.cloudPath, registerProcedure=self.cloudRegisterProcedure, protocol=self.cloudProtocol) fg.icon(self.icon) fg.category(term=self.categoryTerm, scheme=self.categoryScheme, label=self.categoryLabel) fg.contributor(self.contributor) fg.copyright(self.copyright) fg.docs(docs=self.docs) fg.managingEditor(self.managingEditor) fg.rating(self.rating) fg.skipDays(self.skipDays) fg.skipHours(self.skipHours) fg.textInput(title=self.textInputTitle, description=self.textInputDescription, name=self.textInputName, link=self.textInputLink) fg.ttl(self.ttl) fg.webMaster(self.webMaster) fg.updated('2017-02-05 13:26:58+01:00') fg.pubDate('2017-02-05 13:26:58+01:00') fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...') fg.image(url=self.logo, title=self.title, link=self.link2Href, width='123', height='123', description='Example Inage') self.fg = fg
def setUp(self): fg = FeedGenerator() self.nsAtom = "http://www.w3.org/2005/Atom" self.nsRss = "http://purl.org/rss/1.0/modules/content/" self.feedId = 'http://lernfunk.de/media/654321' self.title = 'Some Testfeed' self.authorName = 'John Doe' self.authorMail = '*****@*****.**' self.author = {'name': self.authorName, 'email': self.authorMail} self.linkHref = 'http://example.com' self.linkRel = 'alternate' self.logo = 'http://ex.com/logo.jpg' self.subtitle = 'This is a cool feed!' self.link2Href = 'http://larskiesow.de/test.atom' self.link2Rel = 'self' self.language = 'en' self.categoryTerm = 'This category term' self.categoryScheme = 'This category scheme' self.categoryLabel = 'This category label' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.icon = "http://example.com/icon.png" self.contributor = {'name': "Contributor Name", 'uri': "Contributor Uri", 'email': 'Contributor email'} self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.managingEditor = '*****@*****.**' self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \ '1 r (SS~~000 1))' self.skipDays = 'Tuesday' self.skipHours = 23 self.textInputTitle = "Text input title" self.textInputDescription = "Text input description" self.textInputName = "Text input name" self.textInputLink = "Text input link" self.ttl = 900 self.webMaster = '*****@*****.**' fg.id(self.feedId) fg.title(self.title) fg.author(self.author) fg.link(href=self.linkHref, rel=self.linkRel) fg.logo(self.logo) fg.subtitle(self.subtitle) fg.link(href=self.link2Href, rel=self.link2Rel) fg.language(self.language) fg.cloud(domain=self.cloudDomain, port=self.cloudPort, path=self.cloudPath, registerProcedure=self.cloudRegisterProcedure, protocol=self.cloudProtocol) fg.icon(self.icon) fg.category(term=self.categoryTerm, scheme=self.categoryScheme, label=self.categoryLabel) fg.contributor(self.contributor) fg.copyright(self.copyright) fg.docs(docs=self.docs) fg.managingEditor(self.managingEditor) fg.rating(self.rating) fg.skipDays(self.skipDays) fg.skipHours(self.skipHours) fg.textInput(title=self.textInputTitle, description=self.textInputDescription, name=self.textInputName, link=self.textInputLink) fg.ttl(self.ttl) fg.webMaster(self.webMaster) fg.updated('2017-02-05 13:26:58+01:00') fg.pubDate('2017-02-05 13:26:58+01:00') fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...') fg.image(url=self.logo, title=self.title, link=self.link2Href, width='123', height='123', description='Example Inage') self.fg = fg
def main(): session = vk.Session() api = vk.API(session) group_id = '96469126' group_info = api.groups.getById( group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid']) assert len(group_info) == 1 group_info = group_info[0] url = 'http://vk.com/club{}'.format(group_info['gid']) # a = api.wall.get(owner_id=-1 * group_info['gid']) # # with open('out', 'wb') as fio: # pickle.dump(a, fio) with open('out', 'rb') as fio: data = pickle.loads(fio.read()) assert len(data) > 1 fg = FeedGenerator() fg.id(url) fg.title(_(group_info['name'])) fg.description(_(group_info['description'])) fg.logo(group_info['photo']) site_url = group_info.get('site', url) if group_info.get('site', url) else url fg.link(href=_(site_url)) fg.link(href=_(site_url), rel='self') fg.link(href=_(site_url), rel='alternate') fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'}) fg.webMaster('[email protected] (Alexander Sapronov)') pat = re.compile(r"#(\w+)") for x in data[1:]: post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id']) e = fg.add_entry() # text = x.get('text', '').replace('<br>', '\n') text = x.get('text', '') e.description(_(text)) e.author({'name': _(get_author_name(api, x.get('from_id')))}) e.id(post_link) e.link(href=_(post_link)) e.link(href=_(post_link), rel='alternate') tags = pat.findall(text) title = x.get('text', '') for tag in tags: e.category(term=_(tag)) title = title.replace('#{}'.format(tag), '') title = re.sub('<[^<]+?>', ' ', title) title = textwrap.wrap(title, width=80)[0] e.title(_(title.strip())) fg.rss_file('rss.xml')