endString = dateToURLString(endDate)

##make a new folder that is easy to identify in the docs directory
folderName = key + "_" + startString + "_" + endString

defaultDir = docsFolder + "/" + folderName
saveDest = defaultDir + "/"

if not os.path.exists(defaultDir):
    os.mkdir(defaultDir)

print "Running!"
print "\tGathering content from NPR..."
#npr = NPR.getRange(key, startDate, endDate)
#writeOutputToFile(npr, saveDest + "NPR.dat")
print "\tFinished NPR"
print "\tGathering content from New York Times..."
#nyt = NYT.getRange(key, startDate, endDate)
#writeOutputToFile(nyt, saveDest + "NYT.dat")
print "\tFinished NYT"
print "\tGathering content from Guardian UK..."
#guardian = Guardian.getRange(key, startDate, endDate)
#writeOutputToFile(guardian, saveDest + "guardian.dat")
print "\tFinished Guardian UK"
print "\tGathering content from Tumblr"
tumblr = Tumblr.getRange(key, startDate, endDate)
writeOutputToFile(tumblr, saveDest + "tumblr.dat")
print "\tFinished Tumblr."
print "Analysis Finished!"
exit()
Exemple #2
0
import common
import settings
import Tumblr
import LastFM
import DeviantArt
import Flickr
#import Artstation
import HackerNews
from pprint import pprint

silos = [
    DeviantArt.DAFavs(),
    Flickr.FlickrFavs(),
    Tumblr.TumblrFavs(),
    #    Artstation.ASFavs(),
    LastFM.LastFM(),
    HackerNews.HackerNews()
]

for silo in silos:
    silo.run()
                number of days of coverage
            content:
                day
                    date
                    number of words
                    number of content
                    entity
                        date
                        number of words
                        raw text
                        title (if applicable)
                        author (if applicable)
                        other ?
"""




import NYT
import Twitter
import NPR
import Guardian
import Tumblr


NYT.test()
Twitter.test()
NPR.test()
Guardian.test()
Tumblr.test()
                number of content  (articles, tweets, etc)
                number of words
                start date
                end date
                number of days of coverage
            content:
                day
                    date
                    number of words
                    number of content
                    entity
                        date
                        number of words
                        raw text
                        title (if applicable)
                        author (if applicable)
                        other ?
"""

import NYT
import Twitter
import NPR
import Guardian
import Tumblr

NYT.test()
Twitter.test()
NPR.test()
Guardian.test()
Tumblr.test()
##make a new folder that is easy to identify in the docs directory
folderName = key + "_" + startString + "_" + endString

defaultDir = docsFolder + "/" + folderName
saveDest = defaultDir + "/"

if not os.path.exists(defaultDir):
    os.mkdir(defaultDir)


print "Running!"
print "\tGathering content from NPR..."
#npr = NPR.getRange(key, startDate, endDate)
#writeOutputToFile(npr, saveDest + "NPR.dat")
print "\tFinished NPR"
print "\tGathering content from New York Times..."
#nyt = NYT.getRange(key, startDate, endDate)
#writeOutputToFile(nyt, saveDest + "NYT.dat")
print "\tFinished NYT"
print "\tGathering content from Guardian UK..."
#guardian = Guardian.getRange(key, startDate, endDate)
#writeOutputToFile(guardian, saveDest + "guardian.dat")
print "\tFinished Guardian UK"
print "\tGathering content from Tumblr"
tumblr = Tumblr.getRange(key, startDate, endDate)
writeOutputToFile(tumblr, saveDest + "tumblr.dat")
print "\tFinished Tumblr."
print "Analysis Finished!"
exit()