def generate_feed(): if path.isdir(JEKYLL_POSTS_DIR): shutil.rmtree(JEKYLL_POSTS_DIR) files = sorted(glob('posts/*/*/*.html'))[-3:] feed_items = [] for file in files: post_data = read_data(file) feed_items.append(Item( title=post_data['front_matters']['title'], link=post_data['front_matters']['link'], description=post_data['content'], author='喷嚏网', guid=Guid(post_data['front_matters']['link']), pubDate=post_data['front_matters']['date'] )) feed_items.reverse() feed = Feed( title="喷嚏图卦", link="http://www.pentitugua.com/rss.xml", description="【喷嚏图卦】喷嚏网(www.dapenti.com)-阅读、发现和分享:8小时外的健康生活!", language="zh-CN", lastBuildDate=feed_items[0].pubDate, items=feed_items) rss = feed.rss() # force order attributes rss = re.sub('<rss[^>]+>', '<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">', rss) with open(path.join(BUILD_PATH, 'rss.xml'), 'w') as f: f.write(rss) for file in ['index.html', 'CNAME']: shutil.copy(path.join(BASE_PATH, 'templates', file), path.join(BUILD_PATH, file))
def generate_rss(self, rss_file=None, data=None): """ Generate RSS file from news :param rss_file: Path to file rss.xml :type rss_file: str :param data: Data to create RSS from :type data: dict data['news'] = { ... } :return: Boolean :rtype: bool :raises SystemExit: If 'news' key is not found in 'data' dict :raises SystemExit: If rss file cannot be opened """ if rss_file is not None: Utils.verbose("[rss] rss_file set to %s" % rss_file) self.rss_file = rss_file if data is None: data = self.get_news() elif 'news' not in data: Utils.error("Could not find 'news' key in data") if len(data['news']) == 0: Utils.warn("No data to display") return True items = [] try: for new in data['news']: item = Item(title=new['title'], description=new['text'], author=self.config.get('RSS', 'feed.author'), guid=Guid(self.config.get('RSS', 'feed.news.link') + '#' + str(new['item'])), pubDate=datetime.strptime(new['date'], self.config.get('RSS', 'rss.date.format') .replace('%%', '%')) ) items.append(item) feed = Feed(title=self.config.get('RSS', 'feed.title'), link=self.config.get('RSS', 'feed.link'), description=self.config.get('RSS', 'feed.description'), language=self.config.get('RSS', 'feed.language'), lastBuildDate=datetime.now(), items=items) if self.fh is None: self.fh = open(self.rss_file, 'w') Utils.uprint(feed.rss(), to=self.fh) if self.rss_file is not None: self.fh.close() except (NoOptionError, NoSectionError) as err: Utils.error("Option missing in config file: %s" % str(err)) except (OSError, IOError) as err: Utils.error("Can't open file %s: %s" % (self.rss_file, str(err))) return True
def proposals(space): # Get the proposal from the space graphql_query = """ { proposals( orderBy: "created", orderDirection: desc, where:{space:"%s", state:"active"} ) { id title body created author state } } """ % space r = requests.post('%s/graphql' % SNAPSHOT_API_ENDPOINT, json={'query': graphql_query}) # Success response if r.ok: # Build list of items items = [] for proposal in r.json()['data']['proposals']: items.append( Item( title=proposal['title'], link='%s/#/%s/proposal/%s' % (SNAPSHOT_BASE_URL, space, proposal['id']), description=proposal['body'], author=proposal['author'], guid=Guid(proposal['id'], False), pubDate=datetime.fromtimestamp(int(proposal['created'])), )) feed = Feed(title='%s Proposals' % space, link='%s/api/v1/spaces/%s/proposals' % (RSS_FEED_BASE_URL, space), description="Proposals for %s" % space, language='en-US', lastBuildDate=datetime.now(), items=items) return feed.rss() else: return jsonify({'code': r.status_code, 'text': r.text})
def rss_view(id): # Get all searchs from user search = Search.query.get(id) # Fix prices min_price = search.min_price if min_price == 0: min_price = '' max_price = search.max_price if max_price == 0: max_price = '' # Get Data cookies = { 'searchLat': str(search.lat), 'searchLng': str(search.lng), 'content': str(search.name), 'hideCookieMessage': 'true', 'userHasLogged': '%7B%22hasLogged%22%3Afalse%2C%22times%22%3A1%7D ' } urlSearch = f'https://es.wallapop.com/rest/items?dist={search.distance}&kws={search.name}&lat={search.lat}&lng={search.lng}&maxPrice={max_price}&minPrice={min_price}' results = requests.get(urlSearch, cookies=cookies).json()['items'] # Generate RSS items = [] for result in results: items.append( Item( title= f"{result['title']} - {result['salePrice']}{result['currency']['symbol']}", link=f"https://es.wallapop.com/item/{result['url']}", description=result['description'], author=result['sellerUser']['microName'], guid=Guid(result['itemId']), pubDate=datetime.utcfromtimestamp( int(str(result['publishDate'])[:-3])))) lastBuildDate = datetime.now() if results: lastBuildDate = datetime.utcfromtimestamp( int(str(results[0]['publishDate'])[:-3])) feed = Feed( title=f"{search.name} - Wallaviso RSS", link="http://www.wallaviso.com", description= "Se el primero en Wallapop. Programa tus busquedas y recibe una notificacion al instante.", language="es-ES", lastBuildDate=lastBuildDate, items=items) return feed.rss()
def generate_feed(json_list): items = [] for json_item in json_list: level = json_item.get('level') or 'INFO' item = Item(title=json_item.get('_line'), link='https://app.logdna.com/', pubDate=datetime.now(), author=json_item.get('container'), description=level) items.append(item) feed = Feed(title="LogDNA RSS Feed", link=os.environ.get('RSS_LINK'), description='LogDNA RSS Feed for Rainmeter widgets.', lastBuildDate=datetime.now(), items=items) return feed.rss()
def summary(): response = requests.get("http://localhost/articles/recent/meta/10") data = response.json() item = [] for d in data: item1 = Item(title=d['title'], author=d['author'], pubDate=datetime.datetime(2014, 12, 29, 10, 00), link="http://localhost/articles/recent/meta/10") item.append(item1) feed = Feed( title="A summary feed listing", link="http://localhost/rss", description= "a summary feed listing the title, author, date, and link for 10 most recent articles", language="en-US", lastBuildDate=datetime.datetime.now(), items=item) return feed.rss()
def full_feed(): response = requests.get("http://localhost/articles/recent/10") data = response.json() articles = [] for d in data: article_id = d['url'].split('/')[-1] item2 = Item(title=d['title'], pubDate=datetime.datetime(2014, 12, 29, 10, 00), link="http://localhost/article/" + str(d['article_id'])) a_response = requests.get("http://localhost/article/" + str(d['article_id'])) a_data = a_response.json() item2.title = a_data['title'] item2.author = a_data['author'] item2.description = a_data['body'] c_response = requests.get("http://localhost/article/comments/count/" + str(d['article_id'])) c_data = c_response.json() item2.comments = c_data['count'] t_response = requests.get("http://localhost/article/tags/" + str(d['article_id'])) t_data = t_response.json() item2.categories = t_data['tags'] articles.append(item2) feed = Feed( title="Full feed", link="http://localhost/rss/full_feed", description= "A full feed containing the full text for each article, its tags as RSS categories, and a comment count.", language="en-US", lastBuildDate=datetime.datetime.now(), item=articles) return feed.rss()
def comment_feed(): a_response = requests.get("http://localhost/articles/recent/10") a_data = a_response.json() comments = [] for a in a_data: article_id = a['url'].split('/')[-1] c_response = requests.get("http://localhost/article/comments/" + str(a['article_id'])) c_data = c_response.json() for c in c_data: item3 = Item(author=c['author'], comment=c['comment'], pubDate=datetime.datetime(2014, 12, 29, 10, 00), link="http://localhost/article/" + str(c['article_id'])) comments.append(item3) feed = Feed(title="Comment feed", link="http://localhost/rss/comments", description="A comment feed for each articles", language="en-US", lastBuildDate=datetime.datetime.now(), item=comments) return feed.rss()
os.remove(f) # prep blog posts post_dict = {} items = [] blog_dir = "/home/lbybee/passepartout/notes/blog" blog_posts = [ f for f in os.listdir(blog_dir) if os.path.splitext(f)[1] == ".org" ] for f in blog_posts: post_dict, item = pop_blog(f, blog_dir, post_dict, post_template, main_template) items.append(item) # build index page html = home_template.render(posts=post_dict) html = main_template.render(title="Blog Posts", content_html=html) with open("index.html", "w") as fd: fd.write(html) # build rss page feed = Feed(title="Leland's Blog", link="https://lbybee.github.io", description="My personal blog", language="en-US", lastBuildDate=datetime.now(), items=items) rss = feed.rss() with open("feed.rss", "w") as fd: fd.write(rss)
def get_xml(self: Serializer, response: Response) -> Tuple[str, int]: """ Serialize the provided response data into RSS, version 2.0. Parameters ---------- response : Response The search response data to be serialized. Returns ------- data : str The serialized XML results. status The HTTP status code for the operation. """ # Get the archive info from the first hit. Is this OK? archive = response.hits[0]["primary_classification"]["archive"] archive_id = archive["id"] archive_name = archive["name"] feed = Feed( title=f"{archive_id} updates on arXiv.org", link="http://arxiv.org/", description=f"{archive_name} ({archive_id}) updates on the arXiv.org e-print archive", language="en-us", pubDate=datetime.now(), lastBuildDate=datetime.now(), managingEditor="*****@*****.**" ) # Remove two elements added by the Rfeed package feed.generator = None feed.docs = None # Add extensions that will show up as attributes of the rss element feed.extensions.append(Content()) feed.extensions.append(Taxonomy()) feed.extensions.append(Syndication()) feed.extensions.append(Admin()) feed.image = Image(url="http://arxiv.org/icons/sfx.gif", title="arXiv.org", link="http://arxiv.org") # Add each search result "hit" to the feed for hit in response: # Add links for each author and the abstract to the description element description = "<p>Authors: " first = True for author in hit['authors']: if first: first = False else: description += ", " name = f"{author['last_name']},+{author['initials'].replace(' ', '+')}" description += f"<a href='http://arxiv.org/search/?query={name}&searchtype=author'>" description += f"{author['full_name']}</a>" description += f"</p><p>{hit['abstract']}</p>" # Create the item element for the "hit" item = Item( title=hit['title'], link=url_for("abs_by_id", paper_id=hit['paper_id']), # link=f"http://arxiv.org/abs/{hit['paper_id']}", description=description, guid=Guid(f"oai:arXiv.org:{hit['paper_id']}", isPermaLink=False) ) feed.items.append(item) # Print and return the feed content data = feed.rss() status_code = status.HTTP_200_OK return data, status_code
article_pub_date.hour, article_pub_date.minute))) def getTitles(link): r = requests.get(link) soup = BeautifulSoup(r.text, features="html.parser") if (r.status_code == 200): data = soup.findAll('a', attrs={'class': 'news_post'}) current = 0 for headline in data: if (current % 2 == 1): #print(data[current].text) getArticleSummary(data[current].get('href'), data[current].text) elif current == 20: break current = current + 1 getTitles(link) feed = Feed(title="News Liverpool RSS Feed", link=link, description="News Aggregated from news.liverpool.ac.uk", language="en-US", lastBuildDate=datetime.datetime.now(), items=articles_list) print(feed.rss())