Пример #1
0
def download_images(nzbs):
	# Multi-threaded fetch of poster urls and caching of the images (if not already cached)
	urls = [nzb['poster_url'] for nzb in nzbs if 'poster_url' in nzb and not os.path.exists(nzb['poster_path'])]
	if urls:
		results = mtwr.request_urls(urls, timeout=30)
		for url, url_data in results.iteritems():
			filename = 'data/' + url.split('/')[-1]
			with open(filename, 'wb') as f:
				f.write(url_data)
Пример #2
0
def download_images(nzbs):
    # Multi-threaded fetch of poster urls and caching of the images (if not already cached)
    urls = [
        nzb['poster_url'] for nzb in nzbs
        if 'poster_url' in nzb and not os.path.exists(nzb['poster_path'])
    ]
    if urls:
        results = mtwr.request_urls(urls, timeout=30)
        for url, url_data in results.iteritems():
            filename = 'data/' + url.split('/')[-1]
            with open(filename, 'wb') as f:
                f.write(url_data)
Пример #3
0
def start_feed_monitor():
	while (True):
		data = []
		try:
			feeds = mtwr.request_urls(nzbconfig.feed_urls, timeout=30)
			for response in feeds.values():
				data += json.loads(response)
			print 'Feed just fetched %d elements' % len(data)
		except Exception:
			traceback_message = ''.join(traceback.format_exception(*sys.exc_info()))
			print traceback_message

		html = update_feed(data)
		write_monitor_data(html)

		# Sleep by increments of 1 second to catch the keyboard interrupt
		for i in range(nzbconfig.monitor_interval):
			time.sleep(1)
Пример #4
0
def start_feed_monitor():
    while (True):
        data = []
        try:
            feeds = mtwr.request_urls(nzbconfig.feed_urls, timeout=30)
            for response in feeds.values():
                data += json.loads(response)
            print 'Feed just fetched %d elements' % len(data)
        except Exception:
            traceback_message = ''.join(
                traceback.format_exception(*sys.exc_info()))
            print traceback_message

        html = update_feed(data)
        write_monitor_data(html)

        # Sleep by increments of 1 second to catch the keyboard interrupt
        for i in range(nzbconfig.monitor_interval):
            time.sleep(1)
Пример #5
0
def get_imdb_results(urls, url_response_cache):
	# Multi-threaded fetch of imdb info (if not already cached)
	imdb_results = mtwr.request_urls(urls, timeout=30) if urls else {}
	imdb_results.update(url_response_cache)
	return imdb_results
Пример #6
0
def get_imdb_results(urls, url_response_cache):
    # Multi-threaded fetch of imdb info (if not already cached)
    imdb_results = mtwr.request_urls(urls, timeout=30) if urls else {}
    imdb_results.update(url_response_cache)
    return imdb_results
Пример #7
0
#!/usr/bin/env python

import mtwr

if __name__ == "__main__":
	test_urls = [
		'https://www.google.com',
		'https://www.yahoo.com',
		'http://www.flickr.com',
		'http://www.microsoft.com',
		'http://www.amazon.com',
		'http://www.python.org',
		'http://www.stackoverflow.com',
		'ftp://mirrors.kernel.org/'
	]

	import time
	start = time.clock()
	results = mtwr.request_urls(test_urls, timeout=15, force_ipv4=True)
	print '%d requests in %f seconds' % (len(test_urls), time.clock() - start)

	for url, data in results.iteritems():
		print '%s: [%s...] (%d bytes)' % (url, data[:8].replace('\r', '\\r').replace('\n', '\\n'), len(data))