def get_articles(hubs=[]): if not hubs: return parser.get_articles_from_rss('http://geektimes.ru/rss/hubs', 'geektimes') else: posts = [] url = 'http://geektimes.ru/rss/hub/' for hub in hubs: for post in parser.get_articles_from_rss(url + hub, 'geektimes'): if post not in posts: posts.append(post) return posts
def get_articles(hubs=[]): if not hubs: return parser.get_articles_from_rss('http://habrahabr.ru/rss/hubs', 'habrahabr') else: posts = [] url = 'http://habrahabr.ru/rss/hub/' for hub in hubs: for post in parser.get_articles_from_rss(url + hub, SHORT_NAME): if post not in posts: posts.append(post) return posts
def get_articles(hubs=[]): if not hubs: return parser.get_articles_from_rss('http://geektimes.ru/rss/hubs', 'geektimes') else: posts = [] url = 'http://geektimes.ru/rss/hub/' for hub in hubs: for post in parser.get_articles_from_rss(url + hub, 'geektimes'): if post not in posts: posts.append(post) return posts
def get_articles(categories=['all']): urls = { 'articles': 'b_text', 'news': 'b_news', 'all': '1', 'games': 'games', 'programs': 'progs', 'themes': 'themes', 'questions': 'b_questions', 'main_page': '1/?approved' } if 'all' in categories: selected_urls = [urls['all']] else: selected_urls = [urls[i] for i in categories if i in categories] articles = [] append = articles.append #OPTIMISATION for url in selected_urls: url_ = 'http://trashbox.ru/feed_topics/{0}'.format(url) for article in parser.get_articles_from_rss(url_, 'trashbox'): if article not in articles: append(article) return articles
def get_articles(): articles = [] urls = ['http://mobile-review.com.feedsportal.com/c/33244/f/556830/index.rss', 'http://mobile-review.com.feedsportal.com/c/33244/f/557686/index.rss', 'http://mobile-review.com.feedsportal.com/c/33244/f/557683/index.rss'] for url in urls: articles += parser.get_articles_from_rss(url, SHORT_NAME) return articles
def get_articles(): articles = [] urls = ['http://img.helpix.ru/news/shtml/rss.xml', 'http://helpix.ru/rss/review-helpix.xml'] for url in urls: articles += parser.get_articles_from_rss(url, SHORT_NAME) return articles
def get_articles(): articles = [] urls = [ 'http://mobile-review.com.feedsportal.com/c/33244/f/556830/index.rss', 'http://mobile-review.com.feedsportal.com/c/33244/f/557686/index.rss', 'http://mobile-review.com.feedsportal.com/c/33244/f/557683/index.rss' ] for url in urls: articles += parser.get_articles_from_rss(url, 'mobile-review') return articles
def get_articles(): articles = [] urls = [ 'http://img.helpix.ru/news/shtml/rss.xml', 'http://helpix.ru/rss/review-helpix.xml' ] for url in urls: articles += parser.get_articles_from_rss(url, 'helpix') return articles
def get_articles(collections=[]): articles = [] titles = [] if collections: for collection in collections: parsed = parser.get_articles_from_rss( 'https://medium.com/feed/{}'.format(collection), 'medium') for article in parsed: if article['title'] not in titles: titles.append(article['title']) articles.append(article) else: parsed = parser.get_articles_from_rss( 'https://medium.com/feed/frontpage-picks', 'medium') for article in parsed: titles.append(article['title']) articles.append(article) return articles
def get_articles(): articles = [] urls = ['http://www.3dnews.ru/news/rss', 'http://www.3dnews.ru/software-news/rss'] for url in urls: for article in parser.get_articles_from_rss(url, 'threednews'): if not article in articles: articles.append(article) return articles
def get_articles(collections=[]): articles = [] titles = [] if collections: for collection in collections: parsed = parser.get_articles_from_rss( 'https://medium.com/feed/{}'.format(collection), 'medium') for article in parsed: if article['title'] not in titles: titles.append(article['title']) articles.append(article) else: parsed = parser.get_articles_from_rss( 'https://medium.com/feed/frontpage-picks', 'medium') for article in parsed: titles.append(article['title']) articles.append(article) return articles
def get_articles(reddits=['tech']): articles = [] links = [] for r in reddits: parsed = parser.get_articles_from_rss( 'http://www.reddit.com/r/{}/.rss'.format(r), 'reddit') for article in parsed: if article['link'] not in links: links.append(article['link']) articles.append(article) return articles
def get_articles(reddits=['tech']): articles = [] links = [] for r in reddits: parsed = parser.get_articles_from_rss( 'http://www.reddit.com/r/{}/.rss'.format(r), SHORT_NAME) for article in parsed: if article['link'] not in links: links.append(article['link']) articles.append(article) return articles
def get_articles(): articles = [] urls = [ 'http://www.3dnews.ru/news/rss', 'http://www.3dnews.ru/software-news/rss' ] for url in urls: for article in parser.get_articles_from_rss(url, 'threednews'): if not article in articles: articles.append(article) return articles
def get_articles(categories=['all']): urls = {'news': 'http://www.zdnet.com/news/rss.xml', 'downloads': 'http://downloads.zdnet.com/recent/?mode=rss', 'reviews': 'http://www.zdnet.com/reviews/rss.xml'} if 'all' in categories: categories = ['news', 'downloads', 'reviews'] articles = [] for categorie in categories: url = urls[categorie] for article in parser.get_articles_from_rss(url, SHORT_NAME): if not article in articles: articles.append(article) return articles
def get_articles(categories=['all']): articles = [] cids = {'all': '1', 'android': '22', 'ios': '25', 'c++': '2', 'c#': '3', 'web': '23'} if 'all' in categories: ids = [cids['all']] else: ids = [cids[cat] for cat in categories] urls = ['http://www.codeproject.com/WebServices/ArticleRSS.aspx?cat='+i for i in ids] for url in urls: parsed = parser.get_articles_from_rss(url, SHORT_NAME) for article in parsed: if not article in articles: articles.append(article) return articles
def get_articles(categories=['all']): urls = { 'news': 'http://www.zdnet.com/news/rss.xml', 'downloads': 'http://downloads.zdnet.com/recent/?mode=rss', 'reviews': 'http://www.zdnet.com/reviews/rss.xml' } if 'all' in categories: categories = ['news', 'downloads', 'reviews'] articles = [] for categorie in categories: url = urls[categorie] for article in parser.get_articles_from_rss(url, 'zdnet'): if not article in articles: articles.append(article) return articles
def get_articles(categories=['all']): articles = [] cids = {'all': '1', 'android': '22', 'ios': '25', 'c++': '2', 'c#': '3', 'web': '23'} if 'all' in categories: ids = [cids['all']] else: ids = [cids[cat] for cat in categories] urls = ['http://www.codeproject.com/WebServices/ArticleRSS.aspx?cat='+i for i in ids] for url in urls: parsed = parser.get_articles_from_rss(url, 'codeproject') for article in parsed: if not article in articles: articles.append(article) return articles
def get_articles(categories=['all']): urls = {'articles': 'b_text', 'news': 'b_news', 'all': '1', 'games': 'games', 'programs': 'progs', 'themes': 'themes', 'questions': 'b_questions', 'main_page': '1/?approved'} if 'all' in categories: selected_urls = [urls['all']] else: selected_urls = [urls[i] for i in categories if i in categories] articles = [] append = articles.append #OPTIMISATION for url in selected_urls: url_ = 'http://trashbox.ru/feed_topics/{0}'.format(url) for article in parser.get_articles_from_rss(url_, SHORT_NAME): if article not in articles: append(article) return articles
def get_articles(): return parser.get_articles_from_rss('http://feeds2.feedburner.com/androidcentral', 'androidcentral')
def get_articles(): return parser.get_articles_from_rss('http://gizmodo.com/rss', 'gizmodo')
def get_articles(): return parser.get_articles_from_rss('http://techcrunch.com/feed', 'techcrunch')
def get_articles(): return parser.get_articles_from_rss('https://news.ycombinator.com/rss', 'hackernews')
def get_articles(): return parser.get_articles_from_rss('http://readwrite.com/rss.xml', 'readwrite')
def get_articles(): return parser.get_articles_from_rss('http://www.wired.com/rss', 'wired')
def get_articles(): return parser.get_articles_from_rss('http://mashable.com/rss', 'mashable')
def get_articles(): return parser.get_articles_from_rss('http://gizmodo.com/rss', 'gizmodo')
def get_articles(): return parser.get_articles_from_rss('http://www.smashingmagazine.com/feed/', 'smashingmagazine')
def get_articles(): return parser.get_articles_from_rss('http://digg.com/rss/top.rss', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss( 'http://feeds.dzone.com/dzone/frontpage', 'dzone')
def get_articles(): return parser.get_articles_from_rss( 'http://rss.slashdot.org/Slashdot/slashdot', 'slashdot', False)
def get_articles(): return parser.get_articles_from_rss('http://www.wired.com/rss', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss('http://rss.slashdot.org/Slashdot/slashdot', 'slashdot', False)
def get_articles(): return parser.get_articles_from_rss("http://www.maketecheasier.com/feed", 'maketecheasier')
def get_articles(): return parser.get_articles_from_rss( 'http://www.ixbt.com/export/utf8/articles.rss', 'ixbt')
def get_articles(): return parser.get_articles_from_rss( 'http://feeds.feedburner.com/topdesignmagazine', 'topdesignmagazine')
def get_articles(): return parser.get_articles_from_rss("http://venturebeat.com/feed", 'venturebeat')
def get_articles(): return parser.get_articles_from_rss('http://flowa.fi/rss.xml', 'flowa')
def get_articles(*args, **kwargs): return parser.get_articles_from_rss('http://droider.ru/feed/', 'droider')
def get_articles(): return parser.get_articles_from_rss('http://techcrunch.com/feed', 'techcrunch')
def get_articles(): return parser.get_articles_from_rss( 'http://www.smashingmagazine.com/feed/', 'smashingmagazine')
def get_articles(): return parser.get_articles_from_rss('http://engadget.com/rss.xml', 'engadget')
def get_articles(*args, **kwargs): return parser.get_articles_from_rss('http://droider.ru/feed/', 'droider')
def get_articles(): return parser.get_articles_from_rss('https://news.ycombinator.com/rss', 'hackernews')
def get_articles(): return get_articles_from_rss('http://recode.net/feed/', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss('http://mashable.com/rss', 'mashable')
def get_articles(): return parser.get_articles_from_rss('http://feeds2.feedburner.com/androidcentral', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss( 'http://www.ixbt.com/export/utf8/articles.rss', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss('http://readwrite.com/rss.xml', 'readwrite')
def get_articles(): return get_articles_from_rss('http://recode.net/feed/', 'recode')
def get_articles(): return parser.get_articles_from_rss('http://redroid.ru/rss', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss( 'http://feeds.feedburner.com/topdesignmagazine', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss('http://feeds.dzone.com/dzone/frontpage', 'dzone')
def get_articles(): return parser.get_articles_from_rss('http://engadget.com/rss.xml', 'engadget')
def get_articles(): return parser.get_articles_from_rss('http://flowa.fi/rss.xml', 'flowa')
def get_articles(): return parser.get_articles_from_rss("http://www.maketecheasier.com/feed", SHORT_NAME)
def get_articles(): return get_articles_from_rss('http://planet.clojure.in/atom.xml', SHORT_NAME)
def get_articles(): return parser.get_articles_from_rss( 'http://www.techrepublic.com/rssfeeds/articles/latest/', 'techrepublic')