def main(arguments):
    r = praw.Reddit('Reddit image downloader by u/_Daimon_ ver 0.1')
    subreddit_name = make_multireddit(arguments['<subreddit>'])
    subreddit = r.get_subreddit(subreddit_name)
    test_valid_subreddit(subreddit)
    listing = get_listing(subreddit, arguments['--new'], arguments['--rising'],
                          arguments['--controversial'], arguments['--top'])
    params = {'t': arguments['--time']}
    if arguments['--savedir'] is None:
        save_path = os.path.abspath('.')
    else:
        save_path = os.path.abspath(arguments['--savedir'])
    for sub in listing(limit=int(arguments['--limit']), url_data=params):
        if sub.is_self or 'imgur.com' not in sub.domain:
            continue
        if sub.over_18 and not arguments['--nsfw']:
            continue
        if is_album(sub.url):  # Temporary. This should be handled by pyimgur
            continue
        img_hash = get_img_hash(sub.url)
        try:
            new_image = pyimgur.download_image(img_hash,
                                               size=arguments['--size'])
        except pyimgur.errors.Code404:
            continue
        if arguments['--reddit_name'] or ((new_image.startswith(img_hash)
                                      and arguments['--reddit_over_id'])):
            title = sanitize(sub.title)
            new_name = title + '.' + new_image.split('.')[-1]
            new_name = os.path.join(save_path, new_name)
            if not os.path.exists(new_name):
                shutil.move(new_image, new_name)
        else:
            new_name = os.path.join(save_path, new_image)
            if not os.path.exists(new_name):
                shutil.move(new_image, new_name)
Nate Brennand
"""

from string import replace
import praw
import pyimgur
from sys import argv

imgurFilter = [".jpg",".gif","http://imgur.com/","http://i.imgur.com/"]

if len(argv) != 4:
	print "Usage: python %s <subreddit> <# of photos> <time period>" %argv[0]
	exit(1)

scrape = praw.Reddit(user_agent="imgur photo scraper by u/Twigger")
submissions = scrape.get_subreddit(str(argv[1])).get_top(limit = int(argv[2]),url_data={'t':str(argv[3])})

print "imgur hashes:"
hashes = []
for y in submissions:
	x = str(y.url)
	if "imgur" in x and "a/" not in x:
		for i in imgurFilter:
			x = replace(x,i,"")
		print x
		hashes.append(x)

for x in hashes:
	pyimgur.download_image( x, "large_thumbnail" )
	print "Downloaded %s" %x