def store_revisions(self, page_url): """ Retrieve all the revision of a give wikipedia page_url parameters: - page_url: a wikipedia page URL """ p = Page() d = Dataset( "%s:27017" % (mongodb_host) ) title = url2title(page_url) lang = url2lang(page_url) p.fetch_from_api_title(title, lang=lang) revisions = p.get_all_editors() i = 0 for revision in revisions: i += 1 # ex: en/crimea/revision/999999 key = "%s/%s/revision/%s" % (lang,title,revision["revid"]) # fetch the revision from the internet value = p.get_revisions(extra_params={ "rvstartid": revision["revid"], "rvlimit" : 1}) # write in it the database handler d.write(key, value) self.update_state( state='PROGRESS', meta= { 'current': i, 'total': len(revisions)})
def get_page_info(url, length, index): # print "" # print url title = url2title(url) lang = url2lang(url) print "[%s] %s (%s/%s)" % (lang, title, index, length) wp = Page() # print wp if (lang != "www"): r = wp.fetch_from_api_title(title, lang=lang) file = "dataset/%s.info.json" % (wp.page_id) if not os.path.isfile(file): with open(file, "w") as out: data = { "edits": wp.get_all_editors(), "langs": wp.get_langlinks() } json.dump(data, out)
def dataset_timeline(url): print "timeline: %s" % (url) d = Dataset( "%s:27017" % (mongodb_host) ) title = url2title(url) lang = url2lang(url) url = "%s/%s" % (lang, title) regex_string = "%s\/%s\/revision/([0-9]*$)" % (lang, title) r = d.find({ "url" : { "$regex" : regex_string } }, { "dataset.timestamp" : 1, "dataset.revid" : 1 }) timeline = [] for result in r: i = { "timestamp": result["dataset"][0]["timestamp"], "revid": result["dataset"][0]["revid"] } timeline.append(i) timeline = sorted( timeline, key=lambda rev: rev["timestamp"]) print "start: %s" % (timeline[0]) print "end: %s" % (timeline[-1]) k = "%s/%s/timeline" % (lang, title) d.delete(k) d.write(k, timeline) print r.count()
users = {} init() extra_languages = [ "en", "simple", "uk", "fr", "ru", "de" ] source_in = "data/in/wicrimea-seeds.txt" source_ext = "data/out/wicrimea-seeds.extended.txt" out = open(source_ext, "w") with open(source_in, "r") as file: for l in file: p = Page() r = p.fetch_from_api_title(url2title(l.strip())) print "" print u"→ %s (%s)" % (p.title, l.strip()) out.write(l.strip()+"\n") langs = p.get_langlinks() for l in langs: if l["lang"] in extra_languages: # print l p_lang = Page() p_lang.fetch_from_api_title(l["*"], lang=l["lang"])