def test_005_add_entry_to_new_feed(self): expected_fg = FeedGenerator() expected_fg.title('Test title') expected_fg.link(href='https://example.com/', rel='alternate') expected_fg.description('Test description') expected_fe = expected_fg.add_entry() expected_fe.title('Entry title') expected_fe.author(name='Entry author') expected_fe.link(href='https://example.com/', rel='alternate') expected_fe.content('...') # Create a simple FeedGenerator to pass it to the method # These 3 elements are required fg = FeedGenerator() fg.title('Test title') fg.link(href='https://example.com/', rel='alternate') fg.description('Test description') entry = { 'entry_title': 'Entry title', 'entry_author': 'Entry author', 'entry_link': 'https://example.com/' } content = '...' result_fg = add_entry_to_new_feed(self.logger, fg, entry, content) self.assertItemsEqual(result_fg.rss_str(), expected_fg.rss_str())
def generate(self): main_feed_generator = FeedGenerator() main_feed_generator.title('MIUI Updates Tracker by XiaomiFirmwareUpdater') main_feed_generator.link(href=website, rel='alternate') main_feed_generator.description('Your best website to track MIUI ROM releases!') main_feed_generator.language('en') main_feed_generator.logo(f'{website}/images/xfu.png') main_feed_generator.lastBuildDate(None) for update in self.updates: short_codename = update.codename.split('_')[0] if short_codename not in self.feeds.keys(): feed_generator = FeedGenerator() feed_generator.title(f'{update.name} MIUI Updates Tracker by XiaomiFirmwareUpdater') feed_generator.link(href=f'{website}/miui/{short_codename}', rel='alternate') feed_generator.description('Your best website to track MIUI ROM releases!') feed_generator.language('en') feed_generator.logo(f'{website}/images/xfu.png') feed_generator.lastBuildDate(None) else: feed_generator = self.feeds.get(short_codename) feed_generator = self.add_feed_entry(feed_generator, update) self.feeds.update({short_codename: feed_generator}) main_feed_generator = self.add_feed_entry(main_feed_generator, update) main_feed_generator.rss_file(f"{CONF_DIR}/rss/latest.xml") for codename, feed in self.feeds.items(): feed.rss_file(f"{CONF_DIR}/rss/{codename}.xml")
def init_feed_generator(feed): feed_generator = FeedGenerator() feed_generator.load_extension('podcast') feed_generator.title("PocketCast") feed_generator.link(href=feed.feed.link, rel='alternate') feed_generator.subtitle(feed.feed.description or 'PocketCast') return feed_generator
def gen_feed(title, author, feed_url, url, subtitle, logo, categories=None, album=False, licenses=False): fg = FeedGenerator() fg.load_extension("podcast") fg.id(feed_url) fg.title(title) fg.author(author) fg.link(href=url) fg.link(href=feed_url, rel="self") fg.logo(logo) fg.subtitle(subtitle) fg.language("en") fg.generator(generator="reel2bits", uri=f"https://{current_app.config['AP_DOMAIN']}", version=g.cfg["REEL2BITS_VERSION"]) if album and categories: fg.podcast.itunes_category(categories[0]) fg.category([{"term": c, "label": c} for c in categories]) if licenses: fg.rights("See individual tracks: " + ", ".join(licenses)) return fg
def route(user): init_session() fg = FeedGenerator() fg.title(session['site_name']) fg.subtitle('Trash bag of links') fg.generator(session['site_name']) fg.link(href=get_config_value('address')) fg.logo(f'{get_config_value("address")}/static/img/logo.png') fg.language('en') rss_posts = get_all_posts_for_rss( ) if user is None else get_all_user_posts_for_rss(user) for post in reversed(rss_posts): post_title = Markup((post.title[:60] + '...') if len(post.title) > 60 else post.title) fe = fg.add_entry() fe.title(f'[{post.tags}] {post_title}') fe.link(href=html.unescape(Markup(post.url))) fe.author(name=post.username) fe.pubDate( datetime.datetime.fromtimestamp(post.datetime).strftime('%c +10')) fe.updated( datetime.datetime.fromtimestamp(post.updated).strftime('%c +10')) response = make_response(fg.rss_str(pretty=True)) response.headers.set('Content-Type', 'application/rss+xml') return response
def RSSClient(): rss_name = request.args.get('rss_name', 'None') if rss_name=='None': abort(404) fg = FeedGenerator() #TODO create icon # fg.icon('http://www.det.ua.pt') fg.title(config['rss'][rss_name]['title']) fg.description(config['rss'][rss_name]['description']) if 'language' in config['rss'][rss_name]: fg.language(config['rss'][rss_name]['language']) fg.link(href=config['rss'][rss_name]['href'], rel='related') client = EmailClient(rss_name) for msgn in reversed(client.listBox()[:config['rss'].get('maxitems', 10)]): app.logger.info("RSS Entry: "+msgn.decode('utf-8')) em = client.getEMail(msgn) entry = fg.add_entry() entry.title(em['subject']) entry.guid(config["main"]["baseurl"]+'mail?rss_name='+rss_name+'&uid='+msgn.decode('utf-8')) entry.link({'href':config["main"]["baseurl"]+'mail?rss_name='+rss_name+'&uid='+msgn.decode('utf-8'), 'rel':'alternate'}) entry.pubDate(em['date']) entry.content(em['body']) response = make_response(fg.rss_str(pretty=True)) response.headers["Access-Control-Allow-Origin"] = "*" response.headers['Content-Type'] = 'application/rss+xml' return response
def generate_feed(page=1): feed = FeedGenerator() feed.id("https://pub.dartlang.org/feed.atom") feed.title("Pub Packages for Dart") feed.link(href="https://pub.dartlang.org/", rel="alternate") feed.link(href="https://pub.dartlang.org/feed.atom", rel="self") feed.description("Last Updated Packages") feed.author({"name": "Dart Team"}) i = 1 pager = QueryPager(int(page), "/feed.atom?page=%d", Package.all().order('-updated'), per_page=10) for item in pager.get_items(): i += 1 entry = feed.add_entry() for author in item.latest_version.pubspec.authors: entry.author({"name": author[0]}) entry.title("v" + item.latest_version.pubspec.get("version") +\ " of " + item.name) entry.link(link={"href": "https://pub.dartlang.org/packages/" +\ item.name, "rel": "alternate", "title": item.name}) entry.id(uuid.uuid5(uuid.NAMESPACE_URL, ("https://pub.dartlang.org/packages/" + item.name + "#" +\ item.latest_version.pubspec.get("version")).encode('utf-8')).urn) entry.description( item.latest_version.pubspec.get("description", "Not Available")) readme = item.latest_version.readme_obj if readme is not None: entry.content(readme.render(), type='html') else: entry.content("<p>No README Found</p>", type='html') return feed
def feed(request): http_host = request.META['HTTP_HOST'] if http_host == 'bearblog.dev' or http_host == 'www.bearblog.dev' or http_host == 'localhost:8000': return redirect('/') elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host: extracted = tldextract.extract(http_host) blog = get_object_or_404(Blog, subdomain=extracted.subdomain) root = get_root(blog.subdomain) else: blog = get_object_or_404(Blog, domain=http_host) root = http_host all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'{root}/') fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"{root}/feed/", rel='self') fg.link(href=root, rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{root}/{post.slug}") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{root}/feed") fe.content(unmark(post.content)) atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def cache_rss_latest(user_slug): articles = data.userDocumentLastChanged_list(user_slug) netloc = bottle.request.urlparts.netloc fg = FeedGenerator() fg.id(abs_url(bottle.request, '/user/%s' % user_slug)) fg.title('Nigel Chapman (%s)' % netloc) fg.subtitle('Long reads on Christian thought') # <-- Set METADATA for this # fg.author( {'name':'Nigel Chapman','email':'*****@*****.**'} ) fg.logo('https://%s/static/site-image.png' % (netloc)) fg.link(href='https://%s' % netloc, rel='self') # fg.link(href='https://%s/rss/%s.xml' % (netloc, user_slug), rel='self') fg.language('en') fg.ttl(24 * 3600) for a in articles: fe = fg.add_entry() article_uri = 'read/%s/%s' % (a['user'], a['slug']) fe.id(abs_url(bottle.request, article_uri)) fe.title(a['title']) fe.description(a['summary']) fe.link(href=abs_url(bottle.request, article_uri)) fe.author(name=a['email'], email=a['author']) # <-- Wierdly backwards fe.published(a['published_time']) feed_xml = fg.rss_str(pretty=True) return feed_xml
def setUp(self): fg = FeedGenerator() self.feedId = 'http://example.com' self.title = 'Some Testfeed' fg.id(self.feedId) fg.title(self.title) fg.link(href='http://lkiesow.de', rel='alternate')[0] fg.description('...') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1') fe.title('The First Episode') fe.content(u'…') # Use also the different name add_item fe = fg.add_item() fe.id('http://lernfunk.de/media/654321/1') fe.title('The Second Episode') fe.content(u'…') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1') fe.title('The Third Episode') fe.content(u'…') self.fg = fg
def build_rss_feeds(blog_posts): feed = FeedGenerator() feed.load_extension("media", rss=True, atom=True) base = "https://whotracks.me" for post in blog_posts: if post["publish"]: entry = feed.add_entry() entry.id(f'{base}/blog/{post["filename"]}.html') entry.title(post["title"]) entry.link(link={"href": f"{base}/blog/{post['filename']}.html"}) entry.author({"name": post["author"]}) entry.pubDate( datetime.strptime(post["date"], "%Y-%m-%d").replace(tzinfo=timezone("CET"))) entry.description(post["subtitle"]) entry.media.thumbnail( url=f'https://whotracks.me/static/img/{post["header_img"]}') feed.title("WhoTracksMe blog") feed.description("By the Ghostery tech team") feed.link(link={"href": f"{base}/blog.html"}) feed.id("wtm") feed.language("en") feed.logo(f"{base}/static/img/who-tracksme-logo.png") feed.rss_file("_site/blog/feed.xml")
def rss(_): """Returns the XML content of my RSS feed for the music part of the website. NOTE: We are doing no caching here at all right now, because this function is very fast and the website has no traffic. If this situation changes, then I should cache it so that I don't build this object from scratch every time.""" generator = FeedGenerator() # Add basic metadata. generator.title("Paul's Music Feed") generator.author(name=MY_NAME, email=MY_EMAIL) generator.contributor(name=MY_NAME, email=MY_EMAIL) # RSS requires that we point to our own feed here. Not sure why. generator.link(href=(URL_ROOT + "rss"), rel="self") favicon_path = URL_ROOT + "static/favicon.png" generator.icon(favicon_path) generator.logo(favicon_path) generator.subtitle( "A feed for anyone who wants to know what albums I'm liking.") generator.language("en") albums = get_recent_music(quantity=30) for album in albums: entry = generator.add_entry() entry.title(album.name) path_to_album = URL_ROOT + "music/music/{}".format(album.id) entry.guid(path_to_album, permalink=True) entry.description(album.description()) entry.updated(album.reviewed_at) entry.published(album.reviewed_at) entry.author(name=MY_NAME, email=MY_EMAIL) entry.link(href=path_to_album, rel="alternate") entry.category(term="score__{}".format(album.rating)) return HttpResponse(generator.rss_str())
def out_rss(self,filename): fg=FeedGenerator() fg.register_extension('albopop',AlbopopExtension,AlbopopEntryExtension) fg.id(self.url) fg.title(self.title) fg.description(self.title) fg.author({'name':'alboPOP','email':''}) fg.link(href=self.url) fg.pubDate(formatdate()) fg.webMaster(self.webMaster) fg.docs('https://github.com/mfortini/alboPOP_saga') fg.language('it') fg.albopop.categoryName(self.categoryName) fg.albopop.categoryType(self.categoryType) for item in self.items: fe=fg.add_entry() fe.id(item['link']) fe.category(term=item['tipo']) fe.pubdate(item['pubDate']) fe.link(href=item['link']) fe.title(item['title']) fe.description(item['description']) fe.albopop.categoryUID(str(item['numero'])+'/'+str(item['anno'])) fg.rss_file(filename)
def generate_feed(): ''' render feed ''' from feedgen.feed import FeedGenerator feed_name = 'feed_name' fg = FeedGenerator() fg.id('xxxurl/' + feed_name) fg.title(feed_name) fg.link(href='xxxurl/' + feed_name, rel='alternate') # fg.logo('http://ex.com/logo.jpg') fg.subtitle('by FeedGenerator') fg.link(href='xxxurl/' + feed_name + 'atom', rel='self') fg.language('zh-cn') for page in sorted(pages): fe = fg.add_entry() fe.id(page.metadata['url']) fe.title(page.metadata['title']) fe.link(href=page.metadata['url']) fe.description('\n\n' + page.to_html() + '\n') return fg.rss_str(pretty=True) # 或者 if feed_path: return Response(open(feed_path, encoding='utf-8').read(), mimetype='application/xml') else: abort(404)
def generate_feeds(config_data, output_path, drafts=False): fg = FeedGenerator() fg.id("1234") fg.title(config_data["RSS_title"]) fg.author({ "name": config_data["RSS_author_name"], "email": config_data["RSS_author_email"], }) fg.link(href=config_data["RSS_link"], rel="alternate") fg.logo(config_data["RSS_logo"]) fg.description(config_data["RSS_subtitle"]) fg.link(href=config_data["RSS_link"] + "/test.atom", rel="self") fg.language(config_data["RSS_language"]) for post in config_data["blog_posts"]: fe = fg.add_entry() fe.id(config_data["RSS_link"] + post["url"] + "/") fe.title(post["post_title"]) fe.summary(post["abstract"]) fe.published( datetime.strptime(post["date"], "%Y-%m-%d").isoformat() + "+00:00") fe.link(href=config_data["RSS_link"] + post["url"] + "/") fg.atom_file(os.path.join(output_path, "atom.xml")) fg.rss_file(os.path.join(output_path, "rss.xml"))
def feed(request): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] root = address_info['root'] all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'{root}/') fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"{root}/feed/", rel='self') fg.link(href=root, rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{root}/{post.slug}") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{root}/feed") fe.content(unmark(post.content)) atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def feed(request): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] root = address_info['root'] all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'http://{root}/') fg.author({'name': blog.subdomain, 'email': blog.user.email}) fg.title(blog.title) fg.subtitle(unmark(blog.content)[:160]) fg.link(href=f"http://{root}/", rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"http://{root}/{post.slug}/") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': blog.user.email}) fe.link(href=f"http://{root}/feed/") fe.content(unmark(post.content)) fe.updated(post.published_date) if request.GET.get('type') == 'rss': fg.link(href=f"http://{root}/feed/?type=rss", rel='self') rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type='application/rss+xml') else: fg.link(href=f"http://{root}/feed/", rel='self') atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def _build_feed(changelog_entries, format): from feedgen.feed import FeedGenerator from datetime import datetime import pytz tz = pytz.timezone('Europe/Berlin') BASE_URL = 'https://manual.uberspace.de/en/changelog.' HTML_URL = BASE_URL + 'html' fg = FeedGenerator() fg.id(HTML_URL) fg.title('Uberspace 7 Updates') fg.link(href=HTML_URL, rel='alternate') fg.link(href=BASE_URL + format, rel='self') fg.language('en') for entry in changelog_entries: deeplink = '{}#v{}'.format(HTML_URL, entry['version'].replace('.', '-')) date = tz.localize(datetime.strptime(entry['date'], '%Y-%m-%d')) fe = fg.add_entry() fe.id(deeplink) fe.title('[{}] - {}'.format(entry['version'], entry['date'])) fe.link(href=deeplink) fe.updated(date) fe.content(entry['text'].replace('\n', '<br>')) if format == 'atom': return fg.atom_str(pretty=True) else: raise Exception('unkown feed format: ' + str(format))
def serialize_category_atom(category, url, user, event_filter): """Export the events in a category to Atom. :param category: The category to export :param url: The URL of the feed :param user: The user who needs to be able to access the events :param event_filter: A SQLalchemy criterion to restrict which events will be returned. Usually something involving the start/end date of the event. """ query = (Event.query .filter(Event.category_chain_overlaps(category.id), ~Event.is_deleted, event_filter) .options(load_only('id', 'category_id', 'start_dt', 'title', 'description', 'protection_mode', 'access_key'), subqueryload('acl_entries')) .order_by(Event.start_dt)) events = [e for e in query if e.can_access(user)] feed = FeedGenerator() feed.id(url) feed.title(f'Indico Feed [{category.title}]') feed.link(href=url, rel='self') for event in events: entry = feed.add_entry(order='append') entry.id(event.external_url) entry.title(event.title) entry.summary(sanitize_html(str(event.description)) or None, type='html') entry.link(href=event.external_url) entry.updated(event.start_dt) return BytesIO(feed.atom_str(pretty=True))
def generate_rss(show_info, show_uri, country_code): fg = FeedGenerator() fg.load_extension('podcast') fg.description(show_info['description']) fg.author({'name': show_info['publisher']}) fg.title(show_info['name']) fg.link({'href': show_info['external_urls']['spotify']}) fg.id(show_uri) fg.image(show_info.get('images')[0]['url']) total_episodes = show_info['episodes']['total'] added_episodes = 0 while added_episodes != total_episodes: episodes = sp.show_episodes(show_id=show_uri, limit=50, offset=added_episodes, market=country_code) for episode in episodes['items']: ent = fg.add_entry() ent.podcast.itunes_duration(int(episode['duration_ms'] / 1000)) ent.title(episode.get('name')) ent.guid(episode['uri']) ent.published( datetime.strptime(episode['release_date'], '%Y-%m-%d').replace(tzinfo=timezone.utc)) ent.description(episode['description']) ent.id(episode['uri']) ent.enclosure( url= f"https://anon-podcast.scdn.co/{episode['audio_preview_url'].split('/')[-1]}", length=0, type='audio/mpeg') added_episodes += 1 return fg.rss_str().decode('utf-8')
def create_feed(reports, title, fn): # The feed is a notice of new (versions of) reports, so collect the # most recent report-versions. feeditems = [] for i, report in enumerate(reports): for j, version in enumerate(report['versions']): feeditems.append((version['date'], i, j)) feeditems.sort(reverse=True) feeditems = feeditems[0:75] # Create a feed. from feedgen.feed import FeedGenerator feed = FeedGenerator() feed.id(SITE_URL) feed.title(SITE_NAME + ' - ' + title) feed.link(href=SITE_URL, rel='alternate') feed.language('en') feed.description( description="New Congressional Research Service reports tracked by " + SITE_NAME + ".") for _, report_index, version_index in feeditems: report = reports[report_index] version = report["versions"][version_index] fe = feed.add_entry() fe.id(SITE_URL + "/" + get_report_url_path(report, '.html')) fe.title(version["title"]) fe.description(description=version["summary"]) fe.link(href=SITE_URL + "/" + get_report_url_path(report, '.html')) fe.pubdate(version["date"]) feed.rss_file(os.path.join(BUILD_DIR, fn))
def create_rss_feed(poemlink): """ Takes the link that we fetched and then writes it to an xml file for a feed reader to fetch :param poemlink: url :type poemlink: string """ # Create a feedgen feed instance and populate it with my details poemfeed = FeedGenerator() poemfeed.title("Jason's PF feed") poemfeed.link(href=poemlink) poemfeed.description("Poem of the day") poemfeed.lastBuildDate(datetime.now(pytz.timezone('Asia/Kolkata'))) # Create an rss entry with the url we scraped and parsed pf_current_entry = poemfeed.add_entry() pf_current_entry.title(f"Poem for {date.today()}") pf_current_entry.link(href=poemlink) pf_current_entry.guid(f"Poem for {date.today()}") pf_current_entry.pubDate(datetime.now(pytz.timezone('Asia/Kolkata'))) # Write the feed poemfeed.rss_file('poem.xml')
def get_weekly_jobs_rss(): redis_url = current_app.config.get("CACHE_REDIS_URL") jobs_weekly_email_key = current_app.config.get( "WEEKLY_JOBS_EMAIL_REDIS_KEY") redis = StrictRedis.from_url(redis_url) raw_email_entry = redis.hgetall(jobs_weekly_email_key) title = raw_email_entry[b"title"].decode("UTF-8") content = raw_email_entry[b"html"].decode("UTF-8") timestamp = float(raw_email_entry[b"timestamp"]) date = datetime.fromtimestamp(timestamp, tz=pytz.UTC) feed = FeedGenerator() feed.link(href=request.url_root) feed.title("INSPIRE Weekly HEP Jobs") feed.author({"name": "inspirehep.net"}) feed.description("Feed for weekly HEP jobs from INSPIRE") feed.pubDate(date) feed.lastBuildDate(date) entry = feed.add_entry() entry.id(str(timestamp)) entry.title(title) entry.content(content) entry.published(date) return Response(response=feed.rss_str(), mimetype="application/rss+xml")
def create_rss_feed(filename='rss-new.xml'): fg = FeedGenerator() fg.id(config.get("DEFAULT", "WEBSITE")) fg.title(config.get("DEFAULT", "RSSTITLE")) fg.description(config.get("DEFAULT", "RSSDESCRIPTION")) fg.author ( { "name": config.get("DEFAULT", "AUTHOR"), "email": config.get("DEFAULT", "EMAIL") } ) fg.link(href=config.get("DEFAULT", "RSSLINK"), rel='self' ) fg.subtitle(config.get("DEFAULT", "RSSSUBTITLE")) fg.language(config.get("DEFAULT", "LANGUAGE")) feed = get_feed_list() for entry in feed: fe = fg.add_entry() fe.id (entry.id) fe.title(entry.title) fe.description(entry.description) fe.published(entry.published) fe.pubDate(entry.published) fe.link(href=entry.id) # Uncomment the bottom if you want an Atom RSS file created instead of an rss.xml # atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string # fg.atom_file('atom-new.xml') # Write the ATOM feed to a file rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string fg.rss_file(filename) # Write the RSS feed to a file return fg
def create_feed(): """RSS 피드 생성하기""" # 피드 데이터 저장 전용 객체 fg = FeedGenerator() # 사용자 정의 네임 스페이스를 등록하고 # 이전에 만들었던 클래스 적용하기 fg.register_extension( 'book', extension_class_feed=BookFeedExtension, extension_class_entry=BookEntryExtension, ) # <channel><title> 요소 fg.title("위키북스의 도서 목록") # <channel><link> 요소: <link> 태그의 내용은 href 속성으로 지정 fg.link(href="http://example.com") # <channel><description> 요소 fg.description("설명을 입력했다고 가정합니다.") # <channel><item> 요소 fe = fg.add_entry() # <channel><item><title> 요소 fe.title("파이썬을 이용한 머신러닝, 딥러닝 실전 앱 개발") # <channel><item><link> 요소 fe.link(href="http://example.com") # <channel><item><description> 요소 fe.description('<a href="http://example.com">이스케이프 처리 확인 전용 링크</a>' "설명을 입력했다고 가정합니다.") # <channel><item><book:writer> 요소(사용자 정의 네임 스페이스를 사용하는 요소) fe.book.publisher({'name': "위키북스", 'id': "1"}) # 값은 딕셔너리 자료형으로 전달합니다. # 피드를 RSS 형식으로 변환(pretty=True로 들여쓰기 적용) return fg.rss_str(pretty=True)
def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('media') self.fg.id('id') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description')
def __init__(self, url: str, name: str, email: str, title: str = None, generator: str = None, generator_version: str = None, logo: str = None, icon: str = None, description: str = None, language: str = None) -> None: self.name = name self.email = email self.fg = FeedGenerator() self.fg.id(url + "feed.atom") self.fg.link(href=url + "feed.xml", rel="self") self.fg.link(href=url, rel="alternate") self.fg.author(name=name, email=email) self.fg.contributor(name=name, email=email) self.fg.managingEditor(email) self.fg.webMaster(email) self.fg.title(title) self.fg.generator(generator=generator, version=generator_version) self.fg.logo(logo) self.fg.icon(icon) self.fg.description(description) self.fg.language(language)
def write_feed(posts): fg = FeedGenerator() fg.id(f"{BASE_URL}/updates/") fg.title("St Columbas's - Updates") fg.author(name="St Columba's Free Church", email="*****@*****.**") fg.link(href=f"{BASE_URL}/updates") fg.logo(f"{BASE_URL}/static/images/stcolumbas-logo-small.png") fg.language("en") fg.description("St Columba's Free Church Updates") for p in posts: fe = fg.add_entry() fe.id(f'{BASE_URL}/{p["path"]}') fe.title(p["title"]) fe.link(href=f'{BASE_URL}/{p["path"]}') fe.author(name=p["author"]) fe.summary(p["intro"]) fe.content(markdown(p["body"])) rss_path = os.path.join(DIST_DIR, "updates", "rss.xml") atom_path = os.path.join(DIST_DIR, "updates", "atom.xml") print(rss_path) fg.rss_file(rss_path) print(atom_path) fg.atom_file(atom_path)
def latestRss(userID): userID = userID.lower() shows = {} episodes = [] today = date.today().strftime('%Y-%m-%d') for showID in series.getUserShowList(userID): shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True) episodes.extend( (showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today) episodes.sort(key=episodeAirdateKey, reverse=True) feed = FeedGenerator() feed.id(userID) feed.title('%s\'s shows' % userID) feed.description('Unseen episodes') feed.link(href=request.url_root) feed.language('en') for showID, episode in episodes: entry = feed.add_entry() entry.id('%s/%s' % (showID, episode['episode_id'])) entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title'])) return feed.rss_str(pretty=True)
def rss(lang: str): if not isinstance(lang, str) and lang.isalpha() and len(lang) <= 3: abort(404) posts = get_blog_posts(lang) if not posts: abort(404) fg = FeedGenerator() fg.title('مدونة يوسف شعبان' if lang == 'ar' else 'yshalsager Blog') fg.description('آخر التدوينات على مدونتي' if lang == 'ar' else 'Latest Feeds from my blog') fg.link(href=current_app.config['DOMAIN'], rel='alternate') fg.logo(f"{current_app.config['DOMAIN']}/static/img/logo.svg") fg.language(lang) for post in posts: fe = fg.add_entry() fe.title(post.title) fe.link( href=f"{current_app.config['DOMAIN']}/{lang}/blog/{post.file.stem}" ) fe.description(post.html) fe.pubDate( datetime.combine(datetime.strptime(post.date, "%d-%m-%Y"), datetime.min.time(), tzinfo=timezone.utc)) response = make_response(fg.rss_str(pretty=True)) response.headers.set('Content-Type', 'application/rss+xml') return response