示例#1
0
#   Locally cached XML:
#   - Takes ~5 minutes on my Chromebook
# ONE MONTH (November, 2013):
#   Fetching via requests:
#   - Takes ~2 minutes on my Chromebook
#   Locally cached XML:
#   - Takes ~50 seconds on my Chromebook
# TODO:
# - Append entries past 5000 into tree before working with rows (prototype in get_pages.py)

since = date(2013, 11, 1)
until = date(2013, 11, 30)
# since = None
# until = None

av_tally = {a: 0 for a in avatars}
dailies = spelunky_lb.dailies(since=since, until=until, force=True)

for d in dailies:
    with d as lb:
        for r in lb:
            av_tally[r.avatar] += 1
        print "done with %s" % lb.date

av_sort = sorted(av_tally.items(), key=lambda x:x[1], reverse=True)

with open(os.path.join('output', 'avatars.txt'), 'w') as outfile:
    for c in av_sort:
        print "%s\t%i" % c
        outfile.write("%s\t%i\n" % c)
示例#2
0
#   Locally cached XML:
#   - ~5m:45s on my Chromebook
#
# TODO:
# - Append entries past 5000 into tree before working with rows (prototype in get_pages.py)

## All dailies
# since = None # Defaults to release date
# until = datetime.date.today()

## Prerelease-only dates (for smaller tables/dataset)
# since = datetime.date(2013, 5, 30)
# until = datetime.date(2013, 8, 7)

## Test uncached:
since = datetime.date(2014, 10, 12)
until = datetime.date.today()

dailies = spelunky_lb.dailies(sort=True, persist=True, since=since, until=until)

output = open("output/dailies.txt", "w")
for d in dailies:
    with d as lb:
        lb.persist()
        print lb.date
        output.write(str(lb.date) + "\n" + ("-" * 10) + "\n")
        for row in lb:
            output.write(str(row) + "\n")
        output.write(("-" * 50) + "\n\n")
output.close()