def get(self, slug): response = {} try: mp = MP.all().filter('slug =', slug)[0] response['mp'] = utils.mp_to_dict(mp) except: self.returnJSON(404, response) return self.query = MPVote.all().filter('mp_slug =', slug) self.filterQueryOnParam('question') self.filterQueryOnParam('selection') response['votes'] = [] for vote in self.query: d = db.to_dict(vote) d['question'] = utils.question_to_dict(vote.parent()) del d['mp_party'] del d['mp_constituency'] del d['mp_slug'] del d['mp_name'] response['votes'].append(d) response['total'] = len(response['votes']) self.returnJSON(200, response)
def get(self, id=None): key = '%s' % (id.strip()) logging.info('Request for MP %s', key) mp = MP.all().filter('aristotleid =', long(id)).get() if not mp: self.error(404) articles = helpers.cached(key, lambda:helpers.load_from_json_endpoint('http://content.guardianapis.com/search.json?q=%s' % (quote(mp.name))), 60*60) helpers.render_template(self, 'mp.html', {'mp':mp, 'articles':articles})
def post(self): id = self.request.get('id') api_url = self.request.get('api_url') json = helpers.load_from_json_endpoint(api_url) key = "%s" % (id) name = json['person']['name'] constituency = json['person']['constituency']['name'] #Do Something with the returned information logging.info('Got information on MP %s from %s' % (json['person']['name'], api_url)) mp = MP.get_or_insert(key, aristotleid=int(id), name=name, constituency=constituency)
def get(self, slug): response = {} try: response['mp'] = db.to_dict(MP.all().filter('slug =', slug)[0]) response['mp']['vote_details'] = '/mps/%s/votes' % slug except: response['error'] = 'Cannot find mp' self.returnJSON(404, response) self.returnJSON(200, response)
def get(self): response = {'mps': []} self.query = MP.all() response['total'] = self.query.count() self.filterQueryOnParam('gender') self.filterQueryOnParam('party') response = self.addPagingFilters(response) for mp in self.query: u = db.to_dict(mp) u['details'] = '/mps/%s' % mp.slug response['mps'].append(u) self.returnJSON(200, response)
def get(self): response = { 'mps': [] } self.query = MP.all() response['total'] = self.query.count() self.filterQueryOnParam('gender') self.filterQueryOnParam('party') response = self.addPagingFilters(response) for mp in self.query: u = db.to_dict(mp) u['details'] = '/mps/%s' % mp.slug response['mps'].append(u) self.returnJSON(200, response)
def get(self): helpers.render_template(self, 'index.html', {'mps':MP.all()})
def import_mp_votes(subset=False): if MPVote.all().count() > 0: print "Import already complete" return subset_const = [ "Brighton, Kemptown", "Brighton, Pavillion", "Hove", "Hackney South and Shoreditch", "Edinburgh North, and Leith" ] subset_mp = [ "Caroline Lucas", "Simon Kirby", "Mike Weatherley", "Meg Hillier", "Mark Lazarowicz" ] question_list = {} csvfile = open('fixtures/mp_votes/vote_questions.csv', 'rU') for row in csv.reader(csvfile): d = Question() d.question = row[0] d.title = row[1] d.date = datetime.datetime.now() d.publicwhip_url = row[3] d.put() question_list[row[4]] = d mps_created = [] consts_created = [] for question in question_list: print question csvfile = open('fixtures/mp_votes/%s.csv' % question, 'rU') for row in csv.reader(csvfile): if subset and row[1] not in subset_const and row[0] not in subset_mp: continue try: v = MPVote(parent=question_list[question]) v.question = str(question_list[question].key()) v.mp_name = row[0] v.mp_slug = slugify(row[0]) v.mp_constituency = row[1] v.mp_party = normalise_party(row[2]).lower() v.selection = normalise_selection(row[3]) v.mp_whilst = get_whilst(row[2]) v.put() if v.mp_slug not in mps_created: mp = MP() mp.slug = v.mp_slug mp.name = v.mp_name mp.constituency = v.mp_constituency mp.party = v.mp_party mp.put() mps_created.append(v.mp_slug) if v.mp_constituency not in consts_created: const = Constituency() const.name = v.mp_constituency const.slug = slugify(v.mp_constituency) const.mp_name = v.mp_name const.mp_party = v.mp_party const.put() consts_created.append(v.mp_constituency) except: print "Failed insert"
shuffidx) # randomly choose partition of data into test / fold for fold in range(10): print('> fold ', fold) test_bottom = fold * test_size test_top = (1 + fold) * test_size test_indices = shuffidx[test_bottom:test_top] train_indices = shuffidx[0:test_bottom] + shuffidx[test_top:] loss_func = [] bfs_params = [] test_loss = [] torch.set_rng_state(rng_state) #fix init state torch.manual_seed(0) pht = MP(rbf=bfs, filtername='f') optimizer_filter = optim.SGD([{'params': pht.rbfweights}], lr=lr) for epoch in range(max_epoch): pht.train() np.random.shuffle(train_indices) lss = 0 for b in range(train_batches): bfs_params.append(pht.rbfweights.detach().clone().numpy()) pht.train() train_indices_batch = train_indices[b * batch_size:(b + 1) * batch_size] optimizer_filter.zero_grad()