def updateCacheforList(date_=None): """ Method which runs once per day (cron job) for recache cache with short lifetime """ sc = SlackClient(slack_token) try: if not date_: tomorrow = datetime.now() + timedelta(days=1) date_ = tomorrow.strftime(API_DATE_FORMAT) getAllStaticData(None, force_render=True) getListOfPGs(None, date_, force_render=True) getSessionsList(None, date_, force_render=True) updateCacheIntraDisunion() # store static data for search sc.api_call("chat.postMessage", channel="#parlalize_notif", text='Zgeneriru sem cache za nasledn dan.') except: client.captureException() sc.api_call("chat.postMessage", channel="#parlalize_notif", text='Upss neki je šlo narobe. Nisem zgeneriru cache-a.') return 1
def handle(self, *args, **options): self.stdout.write('Refetch data') getAllStaticData(None, force_render=True) getSessionsList(None, force_render=True) for org in Organization.objects.filter(has_voters=True): getListOfPGs(None, str(org.id_parladata), force_render=True) requests.get(settings.FRONT_URL + '/api/data/refetch') requests.get(settings.GLEJ_URL + '/api/data/refetch') requests.get(settings.ISCI_URL + '/api/data/refetch') requests.get(settings.GLEJ_URL + '/api/cards/renders/delete/all?method=zakon') requests.get(settings.GLEJ_URL + '/api/cards/renders/delete/all?method=seznam-sej')
def handle(self, *args, **options): # get static data self.stdout.write('Getting all static data') static_data = json.loads(getAllStaticData(None).content) # get all votes self.stdout.write('Getting votes') votes = Vote.objects.all() i = 1 output = [] for vote in votes: has_votes = bool(vote.vote.all()) output.append({ 'term': 'VIII', 'type': 'vote', 'id': 'vote_' + str(vote.id_parladata), 'vote_id': vote.id_parladata, 'session_id': vote.session.id_parladata, 'session_json': json.dumps(static_data['sessions'][str( vote.session.id_parladata)]), 'org_id': vote.session.organization.id_parladata, 'start_time': vote.created_for.isoformat(), 'content': vote.motion, 'results_json': json.dumps({ 'motion_id': vote.id_parladata, 'text': vote.motion, 'for': vote.votes_for, 'against': vote.against, 'abstain': vote.abstain, 'absent': vote.not_present, 'result': vote.result, 'is_outlier': False, # TODO: remove hardcoded 'False' when algoritem for is_outlier will be fixed. vote.is_outlier, 'has_outliers': vote.has_outlier_voters, 'has_votes': has_votes }), }) if i % 100 == 0: commit_to_solr(self, output) output = [] i += 1 if len(output): commit_to_solr(self, output) return 0
def handle(self, *args, **options): pg_ids = [] if options['pg_ids']: pg_ids = options['pg_ids'] else: date_of = datetime.now().date() date_ = date_of.strftime(API_DATE_FORMAT) pg_ids = getOrganizationsWithVoters(date_=date_of) # get static data self.stdout.write('Getting all static data') static_data = json.loads(getAllStaticData(None).content) for pg_id in pg_ids: self.stdout.write('About to begin with PG %s' % str(pg_id)) pg = Organization.objects.filter(id_parladata=pg_id) if not pg: self.stdout.write('Organization with id %s does not exist' % str(pg_id)) continue else: pg = pg[0] output = [{ 'term': 'VIII', 'type': 'pgmegastring', 'id': 'pgms_' + str(pg.id_parladata), 'party_id': pg.id_parladata, 'party_json': json.dumps(static_data['partys'][str(pg.id_parladata)]), 'content': getOrgMegastring(pg), }] commit_to_solr(self, output) return 0
def handle(self, *args, **options): # get static data self.stdout.write('Getting all static data') static_data = json.loads(getAllStaticData(None).content) # get all legislations self.stdout.write('Getting legislations') legislations = Legislation.objects.all() i = 1 output = [] for legislation in legislations: sessions = list(legislation.sessions.all().values_list( 'id_parladata', flat=True)) note = legislation.note if note: note = strip_tags(note).replace(" ", " ").replace( "\r", "").replace("\n", " ").replace("š", "š") output.append({ 'term': 'VIII', 'type': 'legislation', 'id': 'legislation_' + str(legislation.id_parladata), 'act_id': legislation.epa, 'sessions': sessions, 'content': note, 'title': legislation.text, 'status': legislation.status, # 'result': legislation.result, # TODO: this is duplicated from status, remove for now 'wb': legislation.mdt, }) if i % 100 == 0: commit_to_solr(self, output) output = [] i += 1 if len(output): commit_to_solr(self, output) return 0
def handle(self, *args, **options): speaker_ids = [] if options['speaker_ids']: speaker_ids = options['speaker_ids'] else: self.stdout.write('Getting voters') speaker_ids = getVotersIDs() # get static data self.stdout.write('Getting all static data') static_data = json.loads(getAllStaticData(None).content) for speaker_id in speaker_ids: self.stdout.write('About to begin with speaker %s' % str(speaker_id)) speaker = Person.objects.filter(id_parladata=speaker_id) if not speaker: self.stdout.write('Speaker with id %s does not exist' % str(speaker_id)) continue else: speaker = speaker[0] output = [{ 'term': 'VIII', 'type': 'pmegastring', 'id': 'pms_' + str(speaker.id_parladata), 'person_id': speaker.id_parladata, 'person_json': json.dumps(static_data['persons'][str(speaker.id_parladata)]), 'content': getSpeakerMegastring(speaker), }] commit_to_solr(self, output) return 0
def uploadSessionToSolr(commander, ses_ids): static_data = json.loads(getAllStaticData(None).content) commander.stdout.write('Sessions for upload %s' % str(ses_ids)) for session_id in ses_ids: commander.stdout.write('About to begin with session %s' % str(session_id)) session = Session.objects.filter(id_parladata=session_id) if not session: commander.stdout.write('Session with id %s does not exist' % str(session_id)) continue else: session = session[0] output = [{ 'term': 'VIII', 'type': 'session', 'id': 'session_' + str(session.id_parladata), 'session_id': session.id_parladata, 'session_json': json.dumps(static_data['sessions'][str(session.id_parladata)]), 'org_id': session.organization.id_parladata, 'start_time': session.start_time.isoformat(), 'content': getSessionMegastring(session), 'title': session.name, }] commit_to_solr(commander, output)
def handle(self, *args, **options): # get all ids from solr url = SOLR_URL + '/select?wt=json&q=type:speech&fl=speech_id&rows=100000000' self.stdout.write('Getting all IDs from %s' % url) a = requests.get(url) docs = a.json()['response']['docs'] idsInSolr = [doc['speech_id'] for doc in docs if 'speech_id' in doc] # get static data self.stdout.write('Getting all static data') static_data = json.loads(getAllStaticData(None).content) # get all valid speeches self.stdout.write('Getting valid speeches') speeches = Speech.getValidSpeeches(datetime.now()) deleteUnvalidSpeeches(idsInSolr, speeches) i = 1 output = [] for speech in speeches.exclude(id_parladata__in=idsInSolr): output.append({ 'term': 'VIII', 'type': 'speech', 'id': 'speech_' + str(speech.id_parladata), 'speech_id': speech.id_parladata, 'person_id': speech.person.first().id_parladata, 'person_json': json.dumps(static_data['persons'][str( speech.person.first().id_parladata)]), 'party_id': speech.organization.id_parladata, 'session_id': speech.session.id_parladata, 'session_json': json.dumps(static_data['sessions'][str( speech.session.id_parladata)]), 'org_id': speech.session.organization.id_parladata, 'start_time': speech.start_time.isoformat(), 'the_order': speech.the_order, 'content': speech.content, }) if i % 100 == 0: commit_to_solr(self, output) output = [] i += 1 if len(output): commit_to_solr(self, output) return 0
def refetch(): getAllStaticData(None, force_render=True) requests.get(settings.GLEJ_URL + '/api/data/refetch') requests.get(settings.FRONT_URL + '/api/data/refetch') requests.get(settings.ISCI_URL + '/api/data/refetch')