def szr_notice(uid): text = '' res = Proceedings.objects.filter(uid=uid, notify=True).order_by('desc', 'id') if res: text = 'V těchto soudních řízeních, která sledujete, došlo ke změně:\n\n' for proc in res: desc = ' ({})'.format(proc.desc) if proc.desc else '' text += ' - {}, sp. zn. {}{}\n'.format( proc.court, composeref(proc.senate, proc.register, proc.number, proc.year), desc) if proc.court_id != SUPREME_ADMINISTRATIVE_COURT: court_type = 'ns' if proc.court_id == SUPREME_COURT else 'os' text += ' {}\n\n'.format(ROOT_URL + GET_PROC.format( proc.court.id, proc.court.reports.id if proc.court.reports else proc.court.id, proc.senate, quote(proc.register.upper()), proc.number, proc.year, court_type)) elif proc.auxid: text += ' {}\n\n'.format(NSS_GET_PROC.format(proc.auxid)) proc.notify = False proc.save() LOGGER.info('Non-empty notice prepared for user "{}" ({:d})'.format(User.objects.get(pk=uid).username, uid)) return text
def __str__(self): return '{}, {}'.format( self.idOsobyPuvodce, composeref( self.senat, 'INS', self.bc, self.rocnik))
def cron_find(): now = datetime.now() try: dec = Decision.objects.filter(anonfilename='', date__gte=(now - OBS)).earliest('updated') dec.updated = now dec.save() res = get(FIND_URL) soup = BeautifulSoup(res.text, 'html.parser') form = soup.find('form') dct = {i['name']: i['value'] for i in form.find_all('input') if i['type'] == 'hidden' and i.has_attr('value')} ref = ('{} '.format(dec.senate) if dec.senate else '') ref += '{0.register} {0.number:d}/{0.year:d}'.format(dec) dct['_ctl0:ContentPlaceMasterPage:_ctl0:txtDatumOd'] = dct['_ctl0:ContentPlaceMasterPage:_ctl0:txtDatumDo'] = \ '{0.day:02d}.{0.month:02d}.{0.year:d}'.format(dec.date) dct['_ctl0:ContentPlaceMasterPage:_ctl0:txtSpisovaZnackaFull'] = ref dct['_ctl0_ContentPlaceMasterPage__ctl0_rbTypDatum_0'] = 'on' res = post(FIND_URL, dct) soup = BeautifulSoup(res.text, 'html.parser') for anchor in soup.select('table#_ctl0_ContentPlaceMasterPage__ctl0_grwA')[0].select('a[title^=Anonymizovan]'): fileurl = anchor['href'] filename = fileurl.split('/')[-1] if not FRE.match(filename): continue res = get(ROOT_URL + fileurl) if not res.ok: continue LOGGER.info( 'Writing anonymized decision "{}"' .format(composeref(dec.senate, dec.register, dec.number, dec.year))) with open(join(REPO_PREF, filename), 'wb') as outfile: if not outfile.write(res.content): # pragma: no cover LOGGER.error( 'Failed to write anonymized decision "{}"' .format(composeref(dec.senate, dec.register, dec.number, dec.year))) return adddoc(APP, filename, ROOT_URL + fileurl) dec.anonfilename = filename dec.save() return except: # pragma: no cover LOGGER.warning('Find failed')
def csvlist(request): LOGGER.debug('CSV list accessed', request, request.GET) reqd = request.GET.copy() try: par = g2p(reqd) res = Hearing.objects.filter(**par).order_by('time', 'pk').distinct() except: raise Http404 total = res.count() if total > EXLIM: return render( request, 'exlim.xhtml', {'app': APP, 'page_title': EXLIM_TITLE, 'limit': EXLIM, 'total': total, 'back': reverse('psj:mainpage')}) response = HttpResponse(content_type='text/csv; charset=utf-8') response['Content-Disposition'] = 'attachment; filename=Jednani.csv' writer = csvwriter(response) hdr = ( 'Soud', 'Jednací síň', 'Datum', 'Čas', 'Spisová značka', 'Řešitel', 'Účastníci řízení', 'Druh jednání', 'Neveřejné', 'Zrušeno', ) writer.writerow(hdr) for item in res: dat = ( item.courtroom.court.name, item.courtroom.desc, '{:%d.%m.%Y}'.format(item.time), '{:%H:%M}'.format(item.time), composeref(item.senate, item.register, item.number, item.year), item.judge.name, ';'.join([p['name'] for p in item.parties.values()]), item.form.name, 'ano' if item.closed else 'ne', 'ano' if item.cancelled else 'ne', ) writer.writerow(dat) return response
def sur_notice(uid): text = '' res = Found.objects.filter(uid=uid).order_by('name', 'id').distinct() if res: text = 'Byli nově zaznamenáni tito účastníci řízení, které sledujete:\n\n' for item in res: text += ' - {0.name}, {0.court}, sp. zn. {1}\n'.format( item, composeref(item.senate, item.register, item.number, item.year)) text += ' {}\n\n'.format(item.url) Found.objects.filter(uid=uid).delete() LOGGER.info('Non-empty notice prepared for user "{}" ({:d})'.format(User.objects.get(pk=uid).username, uid)) Party.objects.filter(uid=uid).update(notify=False) return text
def sur_notice(uid): text = '' res = Found.objects.filter(uid=uid).order_by('name', 'id').distinct() if res: text = 'Byli nově zaznamenáni tito účastníci řízení, které sledujete:\n\n' for item in res: text += ' - {0.name}, {0.court}, sp. zn. {1}\n'.format( item, composeref(item.senate, item.register, item.number, item.year)) text += ' {}\n\n'.format(item.url) Found.objects.filter(uid=uid).delete() LOGGER.info('Non-empty notice prepared for user "{}" ({:d})'.format( User.objects.get(pk=uid).username, uid)) Party.objects.filter(uid=uid).update(notify=False) return text
def procexport(request): LOGGER.debug('Proceedings export page accessed', request) uid = request.user.id uname = request.user.username res = Proceedings.objects.filter(uid=uid).order_by('desc', 'pk').distinct() response = HttpResponse(content_type='text/csv; charset=utf-8') response['Content-Disposition'] = 'attachment; filename=szr.csv' writer = csvwriter(response) for proc in res: dat = (proc.desc, proc.court.id, composeref(proc.senate, proc.register, proc.number, proc.year)) writer.writerow(dat) LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid), request) return response
def getauxid(senate, register, number, year): try: res = get(NSS_URL) soup = BeautifulSoup(res.text, 'html.parser') form = soup.find('form') dct = {i['name']: i['value'] for i in form.find_all('input') if i['type'] == 'hidden' and i.has_attr('value')} ref = composeref(senate, register, number, year) dct['_ctl0:ContentPlaceMasterPage:_ctl0:txtSpisovaZnackaFull'] = ref res = post(NSS_URL, dct) soup = BeautifulSoup(res.text, 'html.parser') oncl = ( soup.select('table#_ctl0_ContentPlaceMasterPage__ctl0_grwA')[0] .select('img[src="/Image/infosoud.gif"]')[0]['onclick']) return int(oncl.split('=')[-1].split("'")[0]) except: return 0
def csvlist(request): LOGGER.debug('CSV list accessed', request, request.GET) reqd = request.GET.copy() try: par = g2p(reqd) res = Decision.objects.filter(**par).order_by('date', 'pk').distinct() except: raise Http404 total = res.count() if total > EXLIM: return render( request, 'exlim.xhtml', {'app': APP, 'page_title': EXLIM_TITLE, 'limit': EXLIM, 'total': total, 'back': reverse('udn:mainpage')}) response = HttpResponse(content_type='text/csv; charset=utf-8') response['Content-Disposition'] = 'attachment; filename=Rozhodnuti.csv' writer = csvwriter(response) hdr = ( 'Soud', 'Datum', 'Číslo jednací', 'Oblast', 'Účastníci řízení', 'Zkrácené znění', 'Anonymisované znění', ) writer.writerow(hdr) for item in res: dat = ( SUPREME_ADMINISTRATIVE_COURT_NAME, '{:%d.%m.%Y}'.format(item.date), composeref(item.senate, item.register, item.number, item.year, item.page), item.agenda.desc, ';'.join([par['name'] for par in item.parties.values()]), join(REPO_PREFIX, item.filename), join(REPO_PREFIX, item.anonfilename) if item.anonfilename else '', ) writer.writerow(dat) return response
def procexport(request): LOGGER.debug('Proceedings export page accessed', request) uid = request.user.id uname = request.user.username res = Proceedings.objects.filter(uid=uid).order_by('desc', 'pk').distinct() response = HttpResponse(content_type='text/csv; charset=utf-8') response['Content-Disposition'] = 'attachment; filename=szr.csv' writer = csvwriter(response) for proc in res: dat = ( proc.desc, proc.court.id, composeref( proc.senate, proc.register, proc.number, proc.year) ) writer.writerow(dat) LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid), request) return response
def __str__(self): return composeref(self.senate, self.register, self.number, self.year, self.page)
def __str__(self): return '{}, {}'.format(self.court, composeref(self.senate, self.register, self.number, self.year))
def __str__(self): return '{}, {}'.format( self.court, composeref(self.senate, self.register, self.number, self.year))
def p2s(proc): return '{}, {}'.format(proc.court_id, composeref(proc.senate, proc.register, proc.number, proc.year))
def cron_update(): nss = Court.objects.get(pk=SUPREME_ADMINISTRATIVE_COURT) try: res = get(FORM_URL) soup = BeautifulSoup(res.text, 'html.parser') form = soup.find('form') dct = {i['name']: i['value'] for i in form.find_all('input') if i['type'] == 'hidden' and i.has_attr('value')} while True: dct['_ctl0:ContentPlaceMasterPage:_ctl0:ddlSortName'] = '5' dct['_ctl0:ContentPlaceMasterPage:_ctl0:ddlSortDirection'] = '1' res = post(FORM_URL, dct) soup = BeautifulSoup(res.text, 'html.parser') for item in soup.select('table.item'): try: ttr = item.select('tr') senate, register, number, year, page = decomposeref(ttr[0].td.text.strip()) if Decision.objects.filter( senate=senate, register=register, number=number, year=year, page=page).exists(): continue fileurl = ttr[4].a['href'] filename = fileurl.split('/')[-1] if not FRE.match(filename): continue res = get(ROOT_URL + fileurl) if not res.ok: continue LOGGER.info('Writing abridged decision "{}"'.format(composeref(senate, register, number, year))) with open(join(REPO_PREF, filename), 'wb') as outfile: if not outfile.write(res.content): # pragma: no cover LOGGER.error( 'Failed to write abridged decision "{}"' .format(composeref(senate, register, number, year))) continue adddoc(APP, filename, ROOT_URL + fileurl) agenda = Agenda.objects.get_or_create(desc=ttr[2].td.text.strip())[0] dat = date(*map(int, list(reversed(ttr[3].td.text.split('.'))))) dec = Decision( senate=senate, register=register, number=number, year=year, page=page, agenda=agenda, date=dat, filename=filename) dec.save() for query in ttr[1].td: if 'strip' in dir(query): qstrip = query.strip() party = Party.objects.get_or_create(name=qstrip)[0] dec.parties.add(party) sur_check( {'check_udn': True}, qstrip, nss, senate, register, number, year, DEC_URL.format(senate, quote(register), number, year, page)) except: # pragma: no cover pass pagers = soup.select('div#PagingBox2')[0] cpag = int(pagers.b.text[1:-1]) pager = pagers.select('a') if cpag > len(pager): break form = soup.find('form') dct = {i['name']: i['value'] for i in form.find_all('input') if i['type'] == 'hidden' and i.has_attr('value')} dct['__EVENTTARGET'] = pager[cpag - 1]['href'][70:-34] dct['__EVENTARGUMENT'] = '' except: # pragma: no cover LOGGER.warning('Update failed')