Ejemplo n.º 1
0
def get_articles(hubs=[]):
	
	if not hubs:
		return parser.get_articles_from_rss('http://geektimes.ru/rss/hubs',
			'geektimes')
	else:
		posts = []
		url = 'http://geektimes.ru/rss/hub/'
		for hub in hubs:
			for post in parser.get_articles_from_rss(url + hub, 'geektimes'):
				if post not in posts:
					posts.append(post)
		
		return posts
Ejemplo n.º 2
0
def get_articles(hubs=[]):
	
	if not hubs:
		return parser.get_articles_from_rss('http://habrahabr.ru/rss/hubs',
			'habrahabr')
	else:
		posts = []
		url = 'http://habrahabr.ru/rss/hub/'
		for hub in hubs:
			for post in parser.get_articles_from_rss(url + hub, SHORT_NAME):
				if post not in posts:
					posts.append(post)
		
		return posts
Ejemplo n.º 3
0
def get_articles(hubs=[]):

    if not hubs:
        return parser.get_articles_from_rss('http://geektimes.ru/rss/hubs',
                                            'geektimes')
    else:
        posts = []
        url = 'http://geektimes.ru/rss/hub/'
        for hub in hubs:
            for post in parser.get_articles_from_rss(url + hub, 'geektimes'):
                if post not in posts:
                    posts.append(post)

        return posts
Ejemplo n.º 4
0
def get_articles(categories=['all']):
    urls = {
        'articles': 'b_text',
        'news': 'b_news',
        'all': '1',
        'games': 'games',
        'programs': 'progs',
        'themes': 'themes',
        'questions': 'b_questions',
        'main_page': '1/?approved'
    }

    if 'all' in categories:
        selected_urls = [urls['all']]
    else:
        selected_urls = [urls[i] for i in categories if i in categories]

    articles = []
    append = articles.append  #OPTIMISATION

    for url in selected_urls:
        url_ = 'http://trashbox.ru/feed_topics/{0}'.format(url)
        for article in parser.get_articles_from_rss(url_, 'trashbox'):
            if article not in articles:
                append(article)

    return articles
Ejemplo n.º 5
0
def get_articles():
	articles = []
	urls = ['http://mobile-review.com.feedsportal.com/c/33244/f/556830/index.rss',
			'http://mobile-review.com.feedsportal.com/c/33244/f/557686/index.rss',
			'http://mobile-review.com.feedsportal.com/c/33244/f/557683/index.rss']
	for url in urls:
		articles += parser.get_articles_from_rss(url, SHORT_NAME)
	
	return articles
Ejemplo n.º 6
0
def get_articles():
	articles = []
	urls = ['http://img.helpix.ru/news/shtml/rss.xml',
			'http://helpix.ru/rss/review-helpix.xml']
	
	for url in urls:
		articles += parser.get_articles_from_rss(url, SHORT_NAME)
	
	return articles
Ejemplo n.º 7
0
def get_articles():
    articles = []
    urls = [
        'http://mobile-review.com.feedsportal.com/c/33244/f/556830/index.rss',
        'http://mobile-review.com.feedsportal.com/c/33244/f/557686/index.rss',
        'http://mobile-review.com.feedsportal.com/c/33244/f/557683/index.rss'
    ]
    for url in urls:
        articles += parser.get_articles_from_rss(url, 'mobile-review')

    return articles
Ejemplo n.º 8
0
def get_articles():
    articles = []
    urls = [
        'http://img.helpix.ru/news/shtml/rss.xml',
        'http://helpix.ru/rss/review-helpix.xml'
    ]

    for url in urls:
        articles += parser.get_articles_from_rss(url, 'helpix')

    return articles
Ejemplo n.º 9
0
def get_articles(collections=[]):
    articles = []
    titles = []

    if collections:
        for collection in collections:
            parsed = parser.get_articles_from_rss(
                'https://medium.com/feed/{}'.format(collection), 'medium')

            for article in parsed:
                if article['title'] not in titles:
                    titles.append(article['title'])
                    articles.append(article)
    else:
        parsed = parser.get_articles_from_rss(
            'https://medium.com/feed/frontpage-picks', 'medium')
        for article in parsed:
            titles.append(article['title'])
            articles.append(article)

    return articles
Ejemplo n.º 10
0
def get_articles():
	articles = []
	
	urls = ['http://www.3dnews.ru/news/rss',
		'http://www.3dnews.ru/software-news/rss']
	
	for url in urls:
		for article in parser.get_articles_from_rss(url, 'threednews'):
			if not article in articles:
				articles.append(article)
	
	return articles
Ejemplo n.º 11
0
def get_articles(collections=[]):
	articles = []
	titles = []
	
	if collections:
		for collection in collections:
			parsed = parser.get_articles_from_rss(
				'https://medium.com/feed/{}'.format(collection), 'medium')
			
			for article in parsed:
				if article['title'] not in titles:
					titles.append(article['title'])
					articles.append(article)
	else:
		parsed = parser.get_articles_from_rss(
			'https://medium.com/feed/frontpage-picks', 'medium')
		for article in parsed:
			titles.append(article['title'])
			articles.append(article)
	
	return articles
Ejemplo n.º 12
0
def get_articles(reddits=['tech']):
    articles = []
    links = []

    for r in reddits:
        parsed = parser.get_articles_from_rss(
            'http://www.reddit.com/r/{}/.rss'.format(r), 'reddit')
        for article in parsed:
            if article['link'] not in links:
                links.append(article['link'])
                articles.append(article)

    return articles
Ejemplo n.º 13
0
def get_articles(reddits=['tech']):
	articles = []
	links = []
	
	for r in reddits:
		parsed = parser.get_articles_from_rss(
			'http://www.reddit.com/r/{}/.rss'.format(r), SHORT_NAME)
		for article in parsed:
			if article['link'] not in links:
				links.append(article['link'])
				articles.append(article)
	
	return articles
Ejemplo n.º 14
0
def get_articles():
    articles = []

    urls = [
        'http://www.3dnews.ru/news/rss',
        'http://www.3dnews.ru/software-news/rss'
    ]

    for url in urls:
        for article in parser.get_articles_from_rss(url, 'threednews'):
            if not article in articles:
                articles.append(article)

    return articles
Ejemplo n.º 15
0
def get_articles(categories=['all']):
	urls = {'news': 'http://www.zdnet.com/news/rss.xml',
		'downloads': 'http://downloads.zdnet.com/recent/?mode=rss',
		'reviews': 'http://www.zdnet.com/reviews/rss.xml'}
	
	if 'all' in categories:
		categories = ['news', 'downloads', 'reviews']
	
	articles = []
	
	for categorie in categories:
		url = urls[categorie]
		for article in parser.get_articles_from_rss(url, SHORT_NAME):
			if not article in articles:
				articles.append(article)
	
	return articles
Ejemplo n.º 16
0
def get_articles(categories=['all']):
	articles = []
	cids = {'all': '1', 'android': '22', 'ios': '25', 'c++': '2',
		'c#': '3', 'web': '23'}
	if 'all' in categories:
		ids = [cids['all']]
	else:
		ids = [cids[cat] for cat in categories]
	
	urls = ['http://www.codeproject.com/WebServices/ArticleRSS.aspx?cat='+i
		for i in ids]
	
	for url in urls:
		parsed = parser.get_articles_from_rss(url, SHORT_NAME)
		for article in parsed:
			if not article in articles:
				articles.append(article)
	
	return articles
Ejemplo n.º 17
0
def get_articles(categories=['all']):
    urls = {
        'news': 'http://www.zdnet.com/news/rss.xml',
        'downloads': 'http://downloads.zdnet.com/recent/?mode=rss',
        'reviews': 'http://www.zdnet.com/reviews/rss.xml'
    }

    if 'all' in categories:
        categories = ['news', 'downloads', 'reviews']

    articles = []

    for categorie in categories:
        url = urls[categorie]
        for article in parser.get_articles_from_rss(url, 'zdnet'):
            if not article in articles:
                articles.append(article)

    return articles
Ejemplo n.º 18
0
def get_articles(categories=['all']):
	articles = []
	cids = {'all': '1', 'android': '22', 'ios': '25', 'c++': '2',
		'c#': '3', 'web': '23'}
	if 'all' in categories:
		ids = [cids['all']]
	else:
		ids = [cids[cat] for cat in categories]
	
	urls = ['http://www.codeproject.com/WebServices/ArticleRSS.aspx?cat='+i
		for i in ids]
	
	for url in urls:
		parsed = parser.get_articles_from_rss(url, 'codeproject')
		for article in parsed:
			if not article in articles:
				articles.append(article)
	
	return articles
Ejemplo n.º 19
0
def get_articles(categories=['all']):
	urls = {'articles': 'b_text', 'news': 'b_news', 'all': '1',
		'games': 'games', 'programs': 'progs', 'themes': 'themes',
		'questions': 'b_questions', 'main_page': '1/?approved'}
	
	if 'all' in categories:
		selected_urls = [urls['all']]
	else:
		selected_urls = [urls[i] for i in categories if i in categories]
	
	articles = []
	append = articles.append #OPTIMISATION
	
	for url in selected_urls:
		url_ = 'http://trashbox.ru/feed_topics/{0}'.format(url)
		for article in parser.get_articles_from_rss(url_, SHORT_NAME):
			if article not in articles:
				append(article)

	return articles
Ejemplo n.º 20
0
def get_articles():
	return parser.get_articles_from_rss('http://feeds2.feedburner.com/androidcentral',
		'androidcentral')
Ejemplo n.º 21
0
def get_articles():
    return parser.get_articles_from_rss('http://gizmodo.com/rss', 'gizmodo')
Ejemplo n.º 22
0
def get_articles():
    return parser.get_articles_from_rss('http://techcrunch.com/feed',
                                        'techcrunch')
Ejemplo n.º 23
0
def get_articles():
    return parser.get_articles_from_rss('https://news.ycombinator.com/rss',
                                        'hackernews')
Ejemplo n.º 24
0
def get_articles():
	return parser.get_articles_from_rss('http://readwrite.com/rss.xml', 'readwrite')
Ejemplo n.º 25
0
def get_articles():
    return parser.get_articles_from_rss('http://www.wired.com/rss', 'wired')
Ejemplo n.º 26
0
def get_articles():
	return parser.get_articles_from_rss('http://mashable.com/rss', 'mashable')
Ejemplo n.º 27
0
def get_articles():
	return parser.get_articles_from_rss('http://gizmodo.com/rss', 'gizmodo')
Ejemplo n.º 28
0
def get_articles():
	return parser.get_articles_from_rss('http://www.smashingmagazine.com/feed/',
		'smashingmagazine')
Ejemplo n.º 29
0
def get_articles():
	return parser.get_articles_from_rss('http://digg.com/rss/top.rss', SHORT_NAME)
Ejemplo n.º 30
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://feeds.dzone.com/dzone/frontpage', 'dzone')
Ejemplo n.º 31
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://rss.slashdot.org/Slashdot/slashdot', 'slashdot', False)
Ejemplo n.º 32
0
def get_articles():
	return parser.get_articles_from_rss('http://www.wired.com/rss', SHORT_NAME)
Ejemplo n.º 33
0
def get_articles():
	return parser.get_articles_from_rss('http://rss.slashdot.org/Slashdot/slashdot',
		'slashdot', False)
Ejemplo n.º 34
0
def get_articles():
    return parser.get_articles_from_rss("http://www.maketecheasier.com/feed",
                                        'maketecheasier')
Ejemplo n.º 35
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://www.ixbt.com/export/utf8/articles.rss', 'ixbt')
Ejemplo n.º 36
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://feeds.feedburner.com/topdesignmagazine', 'topdesignmagazine')
Ejemplo n.º 37
0
def get_articles():
    return parser.get_articles_from_rss("http://venturebeat.com/feed",
                                        'venturebeat')
Ejemplo n.º 38
0
def get_articles():
    return parser.get_articles_from_rss('http://flowa.fi/rss.xml', 'flowa')
Ejemplo n.º 39
0
def get_articles(*args, **kwargs):
	return parser.get_articles_from_rss('http://droider.ru/feed/', 'droider')
Ejemplo n.º 40
0
def get_articles():
	return parser.get_articles_from_rss('http://techcrunch.com/feed', 'techcrunch')
Ejemplo n.º 41
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://www.smashingmagazine.com/feed/', 'smashingmagazine')
Ejemplo n.º 42
0
def get_articles():
    return parser.get_articles_from_rss('http://engadget.com/rss.xml',
                                        'engadget')
Ejemplo n.º 43
0
def get_articles(*args, **kwargs):
    return parser.get_articles_from_rss('http://droider.ru/feed/', 'droider')
Ejemplo n.º 44
0
def get_articles():
	return parser.get_articles_from_rss('https://news.ycombinator.com/rss',
		'hackernews')
Ejemplo n.º 45
0
def get_articles():
	return get_articles_from_rss('http://recode.net/feed/', SHORT_NAME)
Ejemplo n.º 46
0
def get_articles():
    return parser.get_articles_from_rss('http://mashable.com/rss', 'mashable')
Ejemplo n.º 47
0
def get_articles():
	return parser.get_articles_from_rss('http://feeds2.feedburner.com/androidcentral',
		SHORT_NAME)
Ejemplo n.º 48
0
def get_articles():
	return parser.get_articles_from_rss(
		'http://www.ixbt.com/export/utf8/articles.rss', SHORT_NAME)
Ejemplo n.º 49
0
def get_articles():
    return parser.get_articles_from_rss('http://readwrite.com/rss.xml',
                                        'readwrite')
Ejemplo n.º 50
0
def get_articles():
	return get_articles_from_rss('http://recode.net/feed/', 'recode')
Ejemplo n.º 51
0
def get_articles():
	return parser.get_articles_from_rss('http://redroid.ru/rss', SHORT_NAME)
Ejemplo n.º 52
0
def get_articles():
	return parser.get_articles_from_rss(
		'http://feeds.feedburner.com/topdesignmagazine', SHORT_NAME)
Ejemplo n.º 53
0
def get_articles():
	return parser.get_articles_from_rss('http://feeds.dzone.com/dzone/frontpage',
		'dzone')
Ejemplo n.º 54
0
def get_articles():
	return parser.get_articles_from_rss('http://engadget.com/rss.xml', 'engadget')
Ejemplo n.º 55
0
def get_articles():
	return parser.get_articles_from_rss('http://flowa.fi/rss.xml', 'flowa')
Ejemplo n.º 56
0
def get_articles():	
	return parser.get_articles_from_rss("http://www.maketecheasier.com/feed",
		SHORT_NAME)
Ejemplo n.º 57
0
def get_articles():
    return get_articles_from_rss('http://planet.clojure.in/atom.xml', SHORT_NAME)
Ejemplo n.º 58
0
def get_articles():
    return parser.get_articles_from_rss(
        'http://www.techrepublic.com/rssfeeds/articles/latest/',
        'techrepublic')