def statistics_json(): total_candidate_mentions = list(db_candidates.find().sort([ ("mentions.total_count", -1) ])) last_week_candidate_mentions = list(db_candidates.find().sort([ ("mentions.last_week_count", -1) ])) total_mentions = sum(x['mentions']['total_count'] for x in total_candidate_mentions if 'mentions' in x) last_week_mentions = sum(x['mentions']['last_week_count'] for x in last_week_candidate_mentions if 'mentions' in x) num_candidates = sum( 1 for x in total_candidate_mentions if 'mentions' in x and x['mentions']['total_count'] > 0) return jsonify({ 'candidates': { 'total_mentions': total_mentions, 'last_week_mentions': last_week_mentions, 'num_candidates': num_candidates, } })
def statistics(): total_candidate_mentions = db_candidates.find().sort([ ("mentions.total_count", -1) ]).limit(50) last_week_candidate_mentions = db_candidates.find().sort([ ("mentions.last_week_count", -1) ]).limit(50) loc_nat_candidates = list(db_candidates.find()) for candidate in loc_nat_candidates: total = float(candidate['mentions']['national_count'] + candidate['mentions']['local_count']) candidate['mentions']['nat_ratio'] = ( candidate['mentions']['national_count'] / (total + 1) - 0.5) * math.sqrt(total) candidate['mentions']['loc_ratio'] = ( candidate['mentions']['local_count'] / (total + 1) - 0.5) * math.sqrt(total) local_candidate_mentions = sorted(loc_nat_candidates, key=lambda x: x['mentions']['loc_ratio'], reverse=True)[:50] national_candidate_mentions = sorted( loc_nat_candidates, key=lambda x: x['mentions']['nat_ratio'], reverse=True)[:50] return render_template( 'statistics.html', total_candidate_mentions=total_candidate_mentions, last_week_candidate_mentions=last_week_candidate_mentions, national_candidate_mentions=national_candidate_mentions, local_candidate_mentions=local_candidate_mentions)
def index(): stream = StreamItem.get_all(100) last_week_candidate_mentions = db_candidates.find().sort([("mentions.last_week_count", -1)]).limit(6) return render_template('index.html', constituencies=db_areas.find().sort([('name', 1)]), stream=stream, last_week_candidate_mentions=last_week_candidate_mentions)
def index(): stream = StreamItem.get_all(100) last_week_candidate_mentions = db_candidates.find().sort([ ("mentions.last_week_count", -1) ]).limit(6) return render_template( 'index.html', constituencies=db_areas.find().sort([('name', 1)]), stream=stream, last_week_candidate_mentions=last_week_candidate_mentions)
def statistics(): total_candidate_mentions = db_candidates.find().sort([("mentions.total_count", -1)]).limit(50) last_week_candidate_mentions = db_candidates.find().sort([("mentions.last_week_count", -1)]).limit(50) loc_nat_candidates = list(db_candidates.find()) for candidate in loc_nat_candidates: total = float(candidate['mentions']['national_count'] + candidate['mentions']['local_count']) candidate['mentions']['nat_ratio'] = (candidate['mentions']['national_count'] / (total + 1) - 0.5) * math.sqrt(total) candidate['mentions']['loc_ratio'] = (candidate['mentions']['local_count'] / (total + 1) - 0.5) * math.sqrt(total) local_candidate_mentions = sorted(loc_nat_candidates, key=lambda x: x['mentions']['loc_ratio'], reverse=True)[:50] national_candidate_mentions = sorted(loc_nat_candidates, key=lambda x: x['mentions']['nat_ratio'], reverse=True)[:50] return render_template( 'statistics.html', total_candidate_mentions=total_candidate_mentions, last_week_candidate_mentions=last_week_candidate_mentions, national_candidate_mentions=national_candidate_mentions, local_candidate_mentions=local_candidate_mentions )
def statistics_json(): total_candidate_mentions = list(db_candidates.find().sort([("mentions.total_count", -1)])) last_week_candidate_mentions = list(db_candidates.find().sort([("mentions.last_week_count", -1)])) total_mentions = sum( x['mentions']['total_count'] for x in total_candidate_mentions if 'mentions' in x ) last_week_mentions = sum( x['mentions']['last_week_count'] for x in last_week_candidate_mentions if 'mentions' in x ) num_candidates = sum( 1 for x in total_candidate_mentions if 'mentions' in x and x['mentions']['total_count'] > 0 ) return jsonify({ 'candidates': { 'total_mentions': total_mentions, 'last_week_mentions': last_week_mentions, 'num_candidates': num_candidates, } })
def constituency_get_candidates(constituency_id): """Fetches all the (non-deleted) candidates in a given constituency. Args: constituency_id: string, a constituency identifier. Returns: A list of candidate data dictionaries. """ candidate_docs = {} for election_id in elections: query = { "deleted": False, "candidacies.{}.constituency.id".format(election_id): constituency_id, } for candidate in db_candidates.find(query): candidate_docs[candidate["id"]] = candidate candidate_docs = sorted(candidate_docs.values(), key=lambda x: x["name"]) return candidate_docs
from ppsay.db import db_candidates, db_areas, db_domains from bson.json_util import dumps candidates = list(db_candidates.find()) areas = list(db_areas.find()) domains = list(db_domains.find()) out = { 'candidates': candidates, 'areas': areas, 'domains': domains, } print dumps(out)
from urlparse import urlparse from ppsay.importers import ynmp from ppsay.sources import get_source_whitelist from ppsay.db import db_articles, db_candidates client = MongoClient() url = "http://yournextmp.popit.mysociety.org/api/v0.1/export.json" url_regex = re.compile("(http|https)://([^\s]+)") sources = [] all_ids = { candidate['id'] for candidate in db_candidates.find() } found_ids = set() print "Downloading data" export_data = requests.get(url).json() print "Updating candidates" for i, person in enumerate(export_data['persons']): print i, ynmp.save_person(person) found_ids.add(person['id']) # Look for any new sources #if 'versions' in person: # for version in person['versions']:
from pymongo import MongoClient from urlparse import urlparse from ppsay.importers import ynmp from ppsay.sources import get_source_whitelist from ppsay.db import db_articles, db_candidates client = MongoClient() url = "http://yournextmp.popit.mysociety.org/api/v0.1/export.json" url_regex = re.compile("(http|https)://([^\s]+)") sources = [] all_ids = {candidate['id'] for candidate in db_candidates.find()} found_ids = set() print "Downloading data" export_data = requests.get(url).json() print "Updating candidates" for i, person in enumerate(export_data['persons']): print i, ynmp.save_person(person) found_ids.add(person['id']) # Look for any new sources #if 'versions' in person: # for version in person['versions']: # sources.append(version['information_source'])