Exemplo n.º 1
0
def ouv():
    depgp = dict((d['depute_shortid'],{'img':'http://www2.assemblee-nationale.fr/static/tribun/15/photos/'+d['depute_uid'][2:]+'.jpg','g':d['groupe_abrev'],'n':d['depute_nom']}) for d in mdb.deputes.find({},{'depute_nom':1,'depute_uid':1,'depute_shortid':1,'groupe_abrev':1,'_id':None}))
    shortids = dict((d['depute_id'],d['depute_shortid']) for d in mdb.deputes.find({},{'depute_id':1,'depute_shortid':1,'_id':None}))
    depsig = {}
    for doc in mdb.documentsan.find({'signataires':{'$ne':None}},{'numero':1,'signataires':1,'_id':None}):
        if doc['signataires']:
            sig1 = doc['signataires'][0]
            if sig1:
                for sig in doc['signataires'][1:]:
                    if sig and depgp[sig1]['g']!=depgp[sig]['g'] and depgp[sig]['g']!='NI':
                        if sig=='mohamedlaqhila':
                            print doc

                        depsig[sig] = depsig.get(sig,0)+1

    for amd in mdb.amendements.find({},{'numAmend':1,'signataires_ids':1,'_id':None}):
        if not amd['signataires_ids']:
            continue
        sig1 = shortids.get(amd['signataires_ids'][0],None)
        if sig1:
            for sig in amd['signataires_ids'][1:]:
                sig2 = shortids[sig]
                if sig2 and depgp[sig1]['g']!=depgp[sig2]['g'] and depgp[sig2]!='NI':
                    if sig2=='mohamedlaqhila':
                        print amd
                    depsig[sig2] = depsig.get(sig2,0)+1

    return json_response(sorted(depsig.iteritems(),key=lambda x:x[1],reverse=True))
Exemplo n.º 2
0
def presences():

    depute = request.args.get('depute',None)
    commission = request.args.get('commission',None)
    page = int(request.args.get('page','1'))-1

    nb = int(request.args.get('itemsperpage','25'))
    groupe = request.args.get('groupe',request.args.get('group',None))
    skip = nb*page
    filters = []
    if depute:
        filters.append({'depute_shortid': depute})
    if groupe:
        filters.append({'groupe_abrev':groupe})
    if commission:
        filters.append({'commission_sid':commission})
    if len(filters)==0:
        com_filter = {}
    elif len(filters)==1:
        com_filter = filters[0]
    else:
        com_filter = {'$and':filters}

    prescom = list(mdb.presences.find(com_filter).sort([('presence_date',-1)]).skip(skip).limit(nb))

    def countItems():
        rcount = mdb.presences.find(com_filter).count()
        return {'totalitems':rcount}
    cachekey= u"comm%s_%s_%s" % (depute,groupe,commission)
    counts = use_cache(cachekey,lambda:countItems(),expires=3600)

    import math
    nbpages = int(math.ceil(float(counts['totalitems'])/nb))
    result = dict(nbitems=len(prescom),nbpages=nbpages, currentpage=1+page,itemsperpage=nb, items=prescom,**counts)
    return json_response(result)
Exemplo n.º 3
0
def votesgroupes():
    #return json_response(list(mdb.groupes.find({},{'_id':None,'groupe_abrev':1,'groupe_uid':1})))
    libelles = {'FI':u'France Insoumise',
                'LAREM':u'République en Marche',
                'UDI-AGIR': u'UDI, Agir et Indépendants',
                'GDR':u'Gauche Démocratique et Républicaine',
                'NG':u'Nouvelle Gauche',
                'MODEM':u'Mouvement Démocrate',
                'LR':u'Les Républicains',
                'NI':u'Députés Non Inscrits'
                }
    #return json_response(mdb.votes.find_one())
    pgroup = {}
    pgroup['n'] = {'$sum':1}
    pgroup['_id'] = { 'groupe':'$groupe_abrev','position':'$vote_position'}
    pipeline = [{'$group':pgroup}]
    grps = {}

    for v in mdb.votes.aggregate(pipeline):
        g = v['_id']['groupe']
        p = v['_id']['position']
        n = v['n']
        if g=='LC':
            g='UDI-AGIR'
        if not g in grps:
            grps[g] = { 'absent':0,'pour':0,'contre':0,'abstention':0 }
        grps[g][p] += n
    stats = []
    for g in grps:
        stat = dict(g=g)
        tot  = sum(grps[g].values())
        for p in ('pour','contre','abstention','absent'):
            stat[p]=100*float(grps[g][p])/tot

        stats.append(stat)
    stats.sort(key=lambda x:x['absent'],reverse=True)

    from pygal.style import Style
    custom_style = Style(
          font_family="'Montserrat', sans-serif;",
          major_label_font_size=15,
          colors=['#25a87e','#e23d21','#213558','#bbbbbb']
          )


    histo_chart = pygal.HorizontalStackedBar(x_label_rotation=0,width=1024,height=512,human_readable=True, x_title='%',y_title="Groupes parlementaires",style=custom_style)
    histo_chart.title = u'Votes des députés aux scrutins publics\n par groupe parlementaire (au %s)' % (datetime.datetime.now().strftime('%d/%m/%Y'))
    for x in 'pour','contre','abstention','absent':
        histo_chart.add('%s' % x, [stat[x] for stat in stats])
    histo_chart.x_labels = [libelles[stat['g']] for stat in stats]
    histo_chart.y_labels = []
    #histo_chart.x_labels_major = majors


    from StringIO import StringIO
    chart = StringIO()
    histo_chart.render_to_png(chart)
    return image_response('png',chart.getvalue())
    return json_response(stats)
Exemplo n.º 4
0
def interventions():
    nb = int(request.args.get('itemsperpage', '25'))

    page = int(request.args.get('page', '1')) - 1
    groupe = request.args.get('groupe', request.args.get('group', None))
    search = request.args.get('requete', request.args.get('query', ''))
    depute = request.args.get('depute', None)
    session = request.args.get('session', None)
    date = request.args.get('date', None)
    skip = nb * page
    filters = []
    if depute:
        filters.append({'depute_shortid': depute})
    if groupe:
        filters.append({'groupe_abrev': groupe})
    if session:
        filters.append({'session_id': session})
    if date:
        filters.append({'itv_date': date})
    if search:
        filters.append({'$text': {'$search': '"' + search + '"'}})

    if len(filters) == 0:
        itv_filter = {}
    elif len(filters) == 1:
        itv_filter = filters[0]
    else:
        itv_filter = {'$and': filters}

    itvs = list(
        mdb.interventions.find(itv_filter).sort([
            ('itv_date', -1), ('session_id', 1), ('itv_n', 1)
        ]).skip(skip).limit(nb))

    def countItems():
        rcount = mdb.interventions.find(itv_filter).count()
        return {'totalitems': rcount}

    cachekey = u"itv%s_%s_%s_%s_%s" % (depute, groupe, search, session, date)
    counts = use_cache(cachekey, lambda: countItems(), expires=3600)
    regx = re.compile(search, re.IGNORECASE)

    if search:
        for itv in itvs:
            repl = regx.subn('<strong>' + search + '</strong>',
                             itv['itv_contenu'])
            if repl[1]:
                itv['itv_contenu'] = repl[0]

    import math
    nbpages = int(math.ceil(float(counts['totalitems']) / nb))
    result = dict(nbitems=len(itvs),
                  nbpages=nbpages,
                  currentpage=1 + page,
                  itemsperpage=nb,
                  items=itvs,
                  **counts)
    return json_response(result)
Exemplo n.º 5
0
def deputes(func=""):
    if func=='liste':
        resp = _ajax('liste')
    elif func=='top':
        resp = _ajax('top')
    else:
        resp = deputeget(func)

    return json_response(resp)
Exemplo n.º 6
0
def groupes(func=""):
    if func == 'liste':
        resp = _ajax('liste')
    elif func == 'top':
        resp = _ajax('top')
    else:
        resp = groupeget(func)

    return json_response(resp)
Exemplo n.º 7
0
def interventions_last():
    itvs = list(
        mdb.interventions.find({
            '$and': [{
                'itv_president': False
            }, {
                'depute_shortid': {
                    '$ne': None
                }
            }]
        }).sort([('itv_date', -1), ('session_id', -1),
                 ('itv_n', -1)]).limit(1))
    return json_response(itvs)
Exemplo n.º 8
0
def classements():
    return json_response(mdb.votes.find_one())
    pgroup = {}
    pgroup['n'] = {'$sum':1}
    pgroup['_id'] = { 'mois':{'$concat':[{'$substr':['$scrutin_date',6,4]},'-',{'$substr':['$scrutin_date',3,2]}]},'dep':'$depute_shortid','position':'$vote_position'}
    pipeline = [{'$group':pgroup}]
    mois = {}

    for agg in mdb.votes.aggregate(pipeline):
        m = agg['_id']['mois']
        d = agg['_id']['dep']
        p = agg['_id']['position']
        n = agg['n']


        if not d in mois.keys():
            mois[d] = {}
        if not m in mois[d].keys():
            mois[d][m] = {'absent':0,'abstention':0,'pour':0,'contre':0,'total':0}

        mois[d][m][p] += n
        mois[d][m]['total'] += n

    return json_response(mois['charlottelecocq'])
Exemplo n.º 9
0
def hatvpinter():
     if 0:
         r = requests.get('http://www.hatvp.fr/agora/opendata/agora_repertoire_opendata.json')
         content = r.content
         open('/tmp/decs','w').write(content)
     else:
         content = open('/tmp/decs','r').read()
     import json
     decs = json.loads(content)
     nodes = []
     links = []

     for i,fiche in enumerate(decs['publications']):
         print i
         id = fiche['identifiantNational']
         r =requests.get('http://www.hatvp.fr/fiche-organisation/?organisation='+fiche['identifiantNational']+'#')
         if rien not in r.content:
             print "-->",id
             ids.append(id)
     return json_response(ids)
Exemplo n.º 10
0
def scrutinscles():
    nb = int(request.args.get('nb', '0'))
    scrutins_cles = use_cache('scrutins_cles',
                              lambda: getScrutinsCles(),
                              expires=3600)
    scrutins_positions = use_cache('scrutins_positions',
                                   lambda: getScrutinsPositions(),
                                   expires=36000)
    scrutins = mdb.scrutins.find({
        'scrutin_num': {
            '$in': scrutins_cles.keys()
        }
    }).sort([('scrutin_num', -1)])
    if nb > 0:
        scrutins = scrutins.limit(nb)
    scles = []
    for s in scrutins:
        if s['scrutin_dossierLibelle'] == 'N/A' and scrutins_cles[
                s['scrutin_num']]['dossier']:
            dossier = scrutins_cles[s['scrutin_num']]['dossier']
        else:
            dossier = s['scrutin_dossierLibelle']
        positions = dict(scrutins_positions[s['scrutin_num']])
        if scrutins_cles[s['scrutin_num']]['inversion'] == 'oui':

            positions['position'] = {
                'pour': 'contre',
                'contre': 'pour'
            }.get(positions['position'], positions['position'])

            positions['pour'] = scrutins_positions[s['scrutin_num']]['contre']
            positions['contre'] = scrutins_positions[s['scrutin_num']]['pour']

        scles.append(
            dict(desc=s['scrutin_desc'],
                 date=s['scrutin_date'],
                 sort=s['scrutin_sort'],
                 dossierLibelle=dossier.replace(u'\u0092', "'"),
                 detail=scrutins_cles[s['scrutin_num']],
                 positions=positions))
    return json_response(scles)
Exemplo n.º 11
0
def testcompat():

    #return json_response(list(mdb.scrutins.find({'$and':[{'scrutin_amendement_groupe':None},{'scrutin_typedetail':'amendement'}]},{'scrutin_num':1})))
    #for gid in ['FI','GDR','UAI','LR','MODEM','NG','REM']:
    pgroup = {'n':{'$sum':1}}
    pgroup['_id'] = {'depute':'$depute_shortid'}
    pgroup['_id']['position'] ='$vote_position'
    pgroup['_id']['groupe'] = '$scrutin_groupe'
    pipeline = [{'$match':{}},   {"$group": pgroup }] #'scrutin_typedetail':'amendement'

    voteamdts = {}
    for voteamdt in mdb.votes.aggregate(pipeline):
            depuid = voteamdt['_id']['depute']
            pos = voteamdt['_id']['position']
            gp = voteamdt['_id']['groupe']
            if not depuid in voteamdts.keys():
                voteamdts[depuid] = {}
            if not gp in voteamdts[depuid].keys():
                voteamdts[depuid][gp] = {}
            voteamdts[depuid][gp][pos] = voteamdts[depuid][gp].get(pos,0) + voteamdt['n']
    return json_response(voteamdts['mariechristineverdierjouclas'])
Exemplo n.º 12
0
def deputehasard():
    from obsapis.controllers.scrutins import getScrutinsCles
    scrutins_cles = use_cache('scrutins_cles',lambda:getScrutinsCles(),expires=3600)
    nbdeputes = use_cache('nbdeputes',lambda:mdb.deputes.find({'depute_actif':True}).count(),expires=3600)
    mfields = dict((f,1) for f in deputesfields)
    mfields.update({'_id':None})
    depute = mdb.deputes.find({'depute_actif':True},mfields).skip(int(random.random()*nbdeputes)).limit(1)[0]

    photo_an='http://www2.assemblee-nationale.fr/static/tribun/15/photos/'+depute['depute_uid'][2:]+'.jpg'
    depnumdep = depute['depute_departement_id'][1:] if depute['depute_departement_id'][0]=='0' else depute['depute_departement_id']
    if depute['depute_region']==depute['depute_departement']:
        depute_circo_complet = "%s (%s) / %se circ" % (depute['depute_departement'],depnumdep,depute['depute_circo'])
    else:
        depute_circo_complet = "%s / %s (%s) / %se circ" % (depute['depute_region'],depute['depute_departement'],depnumdep,depute['depute_circo'])

    resp = dict(depute_circo_complet = depute_circo_complet,
                depute_photo_an = photo_an,
                id = depute['depute_shortid'],
                **depute)

    return json_response(resp)
Exemplo n.º 13
0
def csa():
    import requests
    import lxml.html
    from StringIO import StringIO
    itvs = []
    r = requests.get('http://www.csa.fr/csapluralisme/tableau?annee=2017')
    html = lxml.html.fromstring(r.content)
    for url in html.xpath('//tr[td[text()[contains(.,"PERSONNALITES POLITIQ7UES")]]]/td[3]/a/@href'):
        print url
        fullurl = 'http://www.csa.fr'+url
        r = requests.get(fullurl)
        parse = csa_pdf(StringIO(r.content))
        if parse=='boom':
            1/0
        itvs += parse

    f = open('test.csv','w')
    for itv in itvs:
        f.write(";".join([itv['chaine'],"%d-%d" % itv['date'],itv['type'],itv['nom'],itv['org'],itv['duree']])+'\n')
    return "ok"

    return json_response(itvs)
Exemplo n.º 14
0
def amds():
    import_amendements()
    update_amendements()
    #return "ok"
    pgroup = {'n':{'$sum':1}}
    pgroup['_id'] = {'depute':'$auteurs.id'}
    pgroup['_id']['sort'] ='$sort'
    pipeline = [{'$match':{}}, {'$unwind':'$auteurs'},  {"$group": pgroup }] #'scrutin_typedetail':'amendement'

    stat_amdts = {}
    for amdt in mdb.amendements.aggregate(pipeline):
        if amdt['_id']['depute']=='lisemagnier':
            print amdt['_id']['sort'],amdt['n']
        amd = amdt['_id']
        if not amd['depute'] in stat_amdts.keys():
            stat_amdts[amd['depute']] = {'rediges':0,'adoptes':0,'cosignes':0}
        if amd['sort']==u'Adopté':
            stat_amdts[amd['depute']]['adoptes'] += amdt['n']

        stat_amdts[amd['depute']]['rediges'] += amdt['n']

    return json_response(stat_amdts)
Exemplo n.º 15
0
def scrutinsdetail(num):
    scrutin = mdb.scrutins.find_one({'scrutin_num': num})
    scrutins_cles = use_cache('scrutins_cles',
                              lambda: getScrutinsCles(),
                              expires=3600)
    scrutin.update(scrutins_cles.get(num, {}))
    positions = {}
    for dpos in mdb.votes.find({'scrutin_num': num}, {
            'depute_shortid': 1,
            'groupe_abrev': 1,
            'vote_position': 1
    }):
        depid = dpos['depute_shortid']
        gp = dpos['groupe_abrev']
        pos = dpos['vote_position']
        if not pos in positions.keys():
            positions[pos] = {}
        if not gp in positions[pos].keys():
            positions[pos][gp] = []
        positions[pos][gp].append(depid)

    scrutin['positions'] = positions
    return json_response(scrutin)
Exemplo n.º 16
0
def connectionsjson2():
    gsel = [0, 1, 2, 3, 4, 5, 7]
    gps = {
        'FI': 0,
        'REM': 1,
        'MODEM': 2,
        'LR': 3,
        'GDR': 4,
        'NG': 5,
        'NI': 6,
        'UAI': 7
    }
    counts = {}
    depgp = dict((d['depute_shortid'], {
        'img':
        'http://www2.assemblee-nationale.fr/static/tribun/15/photos/' +
        d['depute_uid'][2:] + '.jpg',
        'gn':
        d['groupe_abrev'],
        'g':
        gps[d['groupe_abrev']],
        'n':
        d['depute_nom']
    }) for d in mdb.deputes.find({}, {
        'depute_nom': 1,
        'depute_uid': 1,
        'depute_shortid': 1,
        'groupe_abrev': 1,
        '_id': None
    }))
    shortids = dict((d['depute_id'], d['depute_shortid'])
                    for d in mdb.deputes.find({}, {
                        'depute_id': 1,
                        'depute_shortid': 1,
                        '_id': None
                    }))
    liens = []
    allitems = []

    for doc in mdb.documentsan.find({'signataires': {
            '$ne': None
    }}, {
            'numero': 1,
            'signataires': 1,
            '_id': None
    }):
        if doc['signataires']:
            sig1 = doc['signataires'][0]
            counts[sig1] = counts.get(sig1, [])
            if sig1 and depgp[sig1]['g'] in gsel:
                for sig in doc['signataires'][1:]:
                    if sig:
                        counts[sig] = counts.get(sig, [])
                        if depgp[sig1]['g'] in gsel:
                            if depgp[sig1]['g'] != depgp[sig]['g']:
                                counts[sig1].append(sig)
                                counts[sig].append(sig1)
                            allitems.append(sig)
                            liens.append((sig1, sig))
                if len(counts[sig1]) > 0:
                    allitems.append(sig1)

    for amd in mdb.amendements.find({}, {'signataires_ids': 1, '_id': None}):
        if not amd['signataires_ids']:
            continue
        sig1 = shortids.get(amd['signataires_ids'][0], None)
        counts[sig1] = counts.get(sig1, [])
        if sig1 and depgp[sig1]['g'] in gsel:
            for sig in amd['signataires_ids'][1:]:
                sig2 = shortids[sig]
                if sig2:
                    counts[sig2] = counts.get(sig2, [])
                    if depgp[sig1]['g'] in gsel:
                        if depgp[sig1]['g'] != depgp[sig2]['g']:
                            counts[sig1].append(sig2)
                            counts[sig2].append(sig1)
                        allitems.append(sig2)
                        liens.append((sig1, sig2))

            if len(counts[sig1]) > 0:
                allitems.append(sig1)

    r = {'nodes': [], 'links': []}
    c = Counter(frozenset(x) for x in liens).items()
    mx = max([x[1] for x in c])

    for g in gps.keys():
        r['nodes'].append({
            'id': g,
            'name': g,
            'group': gps[g],
            'groupe': True,
            'count': 400
        })
    counts = dict((c, len(list(set(v)))) for c, v in counts.iteritems())
    allitems = list(set([it for it in allitems if counts[it] > 0]))
    for i, d in enumerate(allitems):
        r['nodes'].append({
            'img': depgp[d]['img'],
            'id': d,
            'name': "%s (%s)" % (depgp[d]['n'], depgp[d]['gn']),
            'group': depgp[d]['g'],
            'count': counts.get(d, 0)
        })

    for d in depgp.keys():
        if d in allitems:
            r['links'].append({
                'source': depgp[d]['gn'],
                'target': d,
                'value': 2000
            })

    for l, n in Counter(frozenset(x) for x in liens).items():
        if len(list(l)) == 2:
            s, t = list(l)
            if (depgp[s]['g'] in gsel or depgp[t]['g']
                    in gsel) and s in allitems and t in allitems:
                r['links'].append({'source': s, 'target': t, 'value': n})

    return json_response(r)
Exemplo n.º 17
0
def test():
    gp = {}
    for d in mdb.deputes.find({},{'stats.election':1,'groupe_abrev':1}):
        g = d['groupe_abrev']
        if not g in gp.keys():
            gp[g] = []
        gp[g].append(d['stats']['election']['inscrits'])
    moy = {}
    import numpy
    for g,v in gp.iteritems():
        moy[g] = numpy.median(numpy.array(v))
    return json_response(moy)
    stats = dict(groupe=0,dissidence=0,abstention=0)
    for s in mdb.scrutins.find({'scrutin_num':{'$nin':[404,405,406]}},{'scrutin_positions':1}):
        spos = s['scrutin_positions']['REM']
        for pos in ['pour','contre','abstention']:
            if pos in ['pour','contre']:
                if spos['position']!=pos:
                    stats['dissidence'] += spos.get(pos,0)
                else:
                    stats['groupe'] += spos.get(pos,0)
            else:
                stats[pos] += spos.get(pos,0)

    return json_response(stats)
    from obsapis.tools import parse_content
    import requests
    from lxml import etree
    url = "http://www.assemblee-nationale.fr/15/dossiers/dons_jours_repos_aidants_familiaux.asp"
    #url = "http://www.assemblee-nationale.fr/15/dossiers/soutien_collectivites_accueil_gens_voyage.asp"
    r = requests.get(url)
    xml = parse_content(r.content)
    print xml.xpath('//a[text()[contains(.,"Proposition de loi")]]/@href')
    from stripogram import html2text, html2safehtml
    doc  = html2text(r.content,page_width=10000).decode('iso8859-1').split(u'\n\n')
    start = False
    bloc = ""
    done = False
    for i,l in enumerate(doc):
        l = l.replace(u'1ère',u'première')
        if 'Proposition de loi' in l:
            start = True
            done = False
        elif len(l)<4:
            if start == True:
                start = False
                done = True

        if start:
            bloc += l
        if done:
            print bloc
            done = False
            start = False
            m1 = re.search(r'n. *([0-9]+).*d\xe9pos\xe9e? le ([0-9]+ [^ ]+ [0-9]+).*mis en ligne le ([0-9]+ [^ ]+ [0-9]+).*renvoy\xe9e? \xe0 (.*)',bloc)
            #m1 = re.search(r'n° *([0-9]+).*d\xe9pos\xe9e? le ([0-9]+ [^ ]+ [0-9]+).*mis en ligne le ([0-9]+ [^ ]+ [0-9]+).*renvoy\xe9e? \xe0 (.*)',bloc)
            #print m1
            if m1:
                print m1.groups()
            bloc = ""



    return "ok"

    gps = {}
    import datetime
    return json_response(mdb.amendements.find_one({'auteurs':None}))
    for d in mdb.deputes.find({'depute_actif':True},{'depute_ddn':1,'groupe_abrev':1,'groupe_libelle':1}):
        age = (datetime.datetime.now()-datetime.datetime.strptime(d['depute_ddn'],'%d/%m/%Y')).days/365.25
        gps[d['groupe_libelle']] = gps.get(d['groupe_libelle'],[]) + [age]
    from numpy import median,average
    for k,v in gps.iteritems():
        print v
        print "%s - moyenne : %.2f, mediane : %.2f" % (k,average(v),median(v))
    #for i,d in enumerate(mdb.documentsan.find()):
    #    d['contenu'] = d['titre'] + d.get('contenu','')
    #    mdbrw.documentsan.update_one({'id':d['id']},{'$set':{'contenu':d['contenu']}})
    #    print i
    #import_amendements()
    return json_response(gps)
    #mdbrw.travaux.remove({'sort':'44'})
    #update_travaux()
    #return json_response(list(mdb.travaux.find({'sort':'44'})))

    #return json_response(mdb.questions.find_one({}))

    #import_qag()
    return json_response(mdb.travaux.find_one())
    #return json_response(mdb.interventions.find({'itv_rapporteur':None})))
    #return json_response(mdb.interventions.find({'itv_rapporteur':None}).distinct('itv_date'))
    #return json_response(mdb.interventions.find({'$and':[{'itv_rapporteur':True},{'depute_shortid':'ericcoquerel'}]}))
    from obsapis.controllers.admin.updates.interventions import update_stats_interventions
    deppdp  = {}
    #return json_response(update_stats_interventions())
    for pdp in update_stats_interventions():

        dep = pdp['_id'].get('depute',None)
        if dep:
            if not dep in deppdp.keys():
                deppdp[dep]= dict(n=0,rap=0)
            deppdp[dep]['rap' if pdp['_id']['rapporteur'] else 'n'] += pdp['n']

    return json_response(', '.join('%d. %s (%d)' % (i+1,d[0],d[1]['n']+d[1]['rap']) for i,d in enumerate(sorted(deppdp.items(),key=lambda x:x[1]['n']+x[1]['rap'],reverse=True))))
    counts = {}
    nbmembres = dict((g['groupe_abrev'],g['groupe_nbmembres']) for g in mdb.groupes.find({},{'groupe_abrev':1,'groupe_nbmembres':1}))
    for q in mdb.questions.find({'groupe':{'$ne':None}},{'groupe':1}):
        g = q['groupe']
        if not g in counts.keys():
            counts[g] = 0
        counts[g] += 1
    return json_response([ "%s (%d)" % (g,n/nbmembres[g]) for g,n in sorted(counts.items(),key=lambda x:x[1]/nbmembres[x[0]],reverse=True)])
    col = []
    for d in mdb.deputes.find({},{'depute_collaborateurs_hatvp':1,'_id':None,'depute_shortid':1}):
        col.append((d['depute_shortid'],len(d.get('depute_collaborateurs_hatvp',[]))))
    return json_response(sorted(col,key=lambda x:x[1],reverse=True)[:20])

    import datetime
    #mdbrw.deputes.update_one({'depute_shortid':'michelevictory'},{'$unset':{'stats.commissions':""}})
    return json_response(mdb.deputes.find_one({},{'depute_hatvp':1}))
    return json_response([d['depute_shortid'] for d in mdb.deputes.find({'stats.commissions.present':0.0})])
    #{'$and': [{'depute_actif': True}, ]} [('stats.nonclasse', 1), ('stats.ranks.down.exprimes', 1)]
    return json_response(list(d['depute_shortid'] for d in mdb.deputes.find({'depute_mandat_debut':{'$gte':datetime.datetime(2017,5,21)}},{'depute_shortid':1})))

    return json_response([d['depute_shortid'] for d in mdb.deputes.find({'$and':[{'$or':[{'depute_actif': True},{'depute_shortid':'michelevictory'}]},{u'stats.positions.exprimes': {'$ne': None}}]}).sort([('stats.nonclasse', 1), ('stats.ranks.down.exprimes', 1)]).limit(5)])
    for d in mdb.deputes.find({'depute_election':None}):
        circo = d['depute_circo_id']
        titulaire = mdb.deputes.find_one({'$and':[{'depute_circo_id':circo},{'depute_election':{'$ne':None}}]})
        mdbrw.deputes.update_one({'depute_shortid':d['depute_shortid']},{'$set':{'depute_election':titulaire['depute_election']}})
    return "oj"
    #mdbrw.questions.update_many({'legislature':None},{'$set':{'legislature':15}})
    #update_travaux()
    #return json_response(mdb.interventions.find_one({}))
    return json_response(list(q['itv_contenu_texte'] for q in mdb.interventions.find({'depute_shortid':'mariechristineverdierjouclas'})))

    return json_response(mdb.travaux.distinct('type'))

    #for a in mdb.amendements.find({'suppression':True},{'id':1}):
    #    mdbrw.travaux.update_many({'idori':a['id']},{'$set':{'suppression':True}})

    #mdbrw.travaux.remove({'idori':'S-AMANR5L15PO419610B155N7'})
    #mdbrw.amendements.remove({'id':{'$in':amdlist}})
    #mdbrw.travaux.remove({'idori':{'$in':amdlist}})
    #import_amendements()


    return json_response(list(q['description'] for q in mdb.travaux.find({'$and':[{'auteur':{'$ne':False}},{'type':'QE'},{'depute':'francoisruffin'}]})))

    return json_response(list(mdb.travaux.find({'idori':'S-AMANR5L15PO419610B155N7'})))


    print mdb.travaux.count()
    return json_response(list(t['description'] for t in mdb.travaux.find({'groupe':'FI'})))

    #updateDeputesTravaux()

    #importdocs()


    #import_qag()

    return json_response(mdb.deputes.find_one({'depute_shortid':'francoisruffin'}))
    #importdocs()

    #return json_response(mdb.documentsan.find_one({'$and':[{'typeid':'propositionderesolution'},{'cosignataires.id':'francoisruffin'}]}))
    ops = []
    pgroup = {'n':{'$sum':1}}
    pgroup['_id'] = {'depute':'$auteurs'}

    pipeline = [{'$match':{}}, {'$unwind':'$auteurs'},{"$group": pgroup }] #'scrutin_typedetail':'amendement'
    return json_response(sum(d['n'] for d in mdb.documentsan.aggregate(pipeline)))
    print len(list(mdb.documentsan.aggregate(pipeline))),mdb.documentsan.count()


    #return json_response(mdb.amendements.find({'suppression':True},{'dispositif':1}).count())
    #mdbrw.scrutins.update_one({'scrutin_num':324},{'$set':{'scrutin_liendossier':'http://www.assemblee-nationale.fr/15/dossiers/deuxieme_collectif_budgetaire_2017.asp'}})
    #return json_util.dumps(list(mdb.amendements.find({'numAmend':'426'})))
    #mdbrw.scrutins.update_one({'scrutin_num':1},{'$set':{'scrutin_groupe':'Gouvernement','scrutin_lientexte':[(u'déclaration de politique générale',
    #                                                                          'http://www.gouvernement.fr/partage/9296-declaration-de-politique-generale-du-premier-ministre-edouard-philippe',
    #
    #mdbrw.votes.update_many({'scrutin_num':1},{'$set':{'scrutin_groupe':'Gouvernement'}})


    #return json_response([ (d['depute_shortid'],d['depute_mandat_fin_cause']) for d in mdb.deputes.find({'depute_actif':False},{'depute_shortid':1,'depute_mandat_fin_cause':1,'_id':None})])
    #mdbrw.scrutins.update_one({'scrutin_num':357},{'$set':{'scrutin_lientexte.0.1':'http://www.assemblee-nationale.fr/15/dossiers/jeux_olympiques_paralympiques_2024.asp#'}})
    #return json_response(mdb.scrutins.find_one({'scrutin_num':357}))
    return json_response(mdb.documentsan.distinct('type'))

    # visuels
    pgroup = {}
    pgroup['n'] = {'$sum':1}
    pgroup['_id'] = { 'depute':'$depute'}
    pipeline = [{'$match':{'name':'visuelstat'}},{'$group':pgroup}]
    vdeps = []
    for g in mdb.logs.aggregate(pipeline):
        _g = g['_id']['depute']
        if _g != None:
            vdeps.append((_g,g['n']))

    return ", ".join([ "%s (%s)" % i for i in sorted(vdeps,key=lambda x:x[1],reverse=True)])

    #updateDeputesContacts()
    return json_util.dumps(mdb.deputes.find_one({'depute_shortid':'nicolelepeih'},{'depute_contacts':1,'_id':None}))
    #importdocs()
    #return json_util.dumps(list(mdb.logs.find({'name':'visuelstat'})))
    mts = list(mdb.scrutins.find({ '$text': { '$search': "rejet" } },{'scrutin_groupe':1,'scrutin_fulldesc':1,'scrutin_sort':1,'_id':None}))
    _mts = "\n".join([";".join([m.get('scrutin_groupe',''),m['scrutin_sort'],m['scrutin_fulldesc']]) for m in mts])
    print _mts

    return json_util.dumps(mdb.deputes.find_one({'depute_shortid':'thierrysolere'},{'stats':1,'_id':None}))

    return json_util.dumps([(d['depute_nom'],
                             d['stats']['positions']['exprimes'],
                             d['stats']['votesamdements']['pctpour'],
                             d['depute_shortid']) for d in mdb.deputes.find({'groupe_abrev':'REM','stats.positions.exprimes':{'$gt':20}},{'depute_nom':1,'depute_shortid':1,'stats.positions.exprimes':1,'stats.votesamdements.pctpour':1}).sort([('stats.votesamdements.pctpour',-1)]).limit(20)])
    from fuzzywuzzy import fuzz
    sdesc = [(s['scrutin_dossier'],s['scrutin_dossierLibelle'],s['scrutin_desc'][20:]) for s in mdb.scrutins.find({'scrutin_dossier':{'$ne':'N/A'}},{'scrutin_dossier':1,'scrutin_dossierLibelle':1,'scrutin_desc':1,'_id':None})]
    r = []
    for s in mdb.scrutins.find({'scrutin_dossier':'N/A'},{'scrutin_desc':1,'_id':None,'scrutin_id':1}):
        for dos,doslib,d in sdesc:
            fz = fuzz.partial_ratio(s['scrutin_desc'][20:],d)
            if fz>97:
                r.append((s['scrutin_id'],dos,doslib))
                break

    return json_util.dumps(r)
    return json_util.dumps([(d['depute_shortid'],d['depute_suppleant'],d['depute_mandat_fin']) for d in mdb.deputes.find({'depute_actif':False})])
    return json_util.dumps(list(mdb.amendements.find({'sort':u"Adopt\u00e9","signataires_groupes":{'$elemMatch':{'$eq':'FI'}}},{'_id':None,'numInit':1,'numAmend':1})))
Exemplo n.º 18
0
def votes():
    scrutins_data = use_cache('scrutins_data_new',
                              lambda: getScrutinsData(),
                              expires=300)
    nb = int(request.args.get('itemsperpage', '25'))
    page = int(request.args.get('page', '1')) - 1
    groupe = request.args.get('groupe', request.args.get('group', None))
    search = request.args.get('requete', request.args.get('query', ''))
    scrutingroupe = request.args.get('scrutingroupe', None)

    scrutin = request.args.get('scrutin', None)
    csp = request.args.get('csp', None)
    age = request.args.get('age', None)
    region = request.args.get('region', None)
    depute = request.args.get('depute', None)
    position = request.args.get('position', None)
    dissidence = int(request.args.get('dissidence', '0'))
    skip = nb * page
    filters = []
    if scrutingroupe:
        filters.append({'scrutin_groupe': scrutingroupe})
    if dissidence:
        filters.append({'vote_dissident': True})
    if position:
        filters.append({'vote_position': position})
    if depute:
        filters.append({'depute_shortid': depute})
    if csp:
        filters.append({'depute_csp': csp})
    if age:
        filters.append({'depute_classeage': age})
    if groupe:
        filters.append({'groupe_abrev': groupe})
    if region:
        filters.append({'depute_region': region})
    if scrutin:
        try:
            scrutin = int(scrutin)
        except:
            pass
        filters.append({'scrutin_num': scrutin})
    if search:
        filters.append({'$text': {'$search': '"' + search + '"'}})
    if len(filters) == 0:
        vote_filter = {}
    elif len(filters) == 1:
        vote_filter = filters[0]
    else:
        vote_filter = {'$and': filters}

    votes = []
    import re
    for v in mdb.votes.find(vote_filter).sort('scrutin_num',
                                              -1).skip(skip).limit(nb):
        v['scrutin_sort'] = scrutins_data[v['scrutin_num']]['sort']
        if scrutins_data[v['scrutin_num']]['urlAmendement']:
            pass
            #v['scrutin_desc'] = re.sub(r'([0-9]+)',r'<a target="_blank" href="'+scrutins_data[v['scrutin_num']]['urlAmendement']+r'">\1</a>',v['scrutin_desc'],1)

        for i, lien in enumerate(
                scrutins_data[v['scrutin_num']]['scrutin_lientexte']):
            v['scrutin_desc'] = v['scrutin_desc'].replace(
                lien[0], 'LIEN%d' % i)

        for i, lien in enumerate(
                scrutins_data[v['scrutin_num']]['scrutin_lientexte']):
            v['scrutin_desc'] = v['scrutin_desc'].replace(
                'LIEN%d' % i, '<a target="_blank" href="' + lien[1] + r'">' +
                lien[0] + '</a>')

        v['scrutin_dossierLibelle'] = v['scrutin_dossierLibelle'].replace(
            u'\u0092', "'")  # pb apostrophe
        votes.append(v)

    def countItems():
        rcount = mdb.votes.find(vote_filter).count()
        return {'totalitems': rcount}

    cachekey = u"vot%s_%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
        depute, position, scrutingroupe, dissidence, scrutin, age,
        csp if csp else csp, groupe, search, region if region else region)
    counts = use_cache(cachekey, lambda: countItems(), expires=3600)
    regx = re.compile(search, re.IGNORECASE)
    if search:
        for v in votes:
            repl = regx.subn('<strong>' + search + '</strong>',
                             v['scrutin_desc'])
            if repl[1]:
                v['scrutin_desc'] = repl[0]

    import math
    nbpages = int(math.ceil(float(counts['totalitems']) / nb))
    result = dict(nbitems=len(votes),
                  nbpages=nbpages,
                  currentpage=1 + page,
                  itemsperpage=nb,
                  items=votes,
                  **counts)
    return json_response(result)
Exemplo n.º 19
0
def hatvp():
    from obsapis.controllers.admin.imports.hatvp import update_hatvp
    update_hatvp()
    return json_response(mdb.deputes.find_one({},{'depute_collaborateurs_hatvp':1}))
Exemplo n.º 20
0
def view_updateDeputesContacts():
    return json_response(updateDeputesContacts())
Exemplo n.º 21
0
def obsgouv_getdata():
    return json_response(import_obsgouv_gdoc())
Exemplo n.º 22
0
def longs():
    return json_response(maxis())
Exemplo n.º 23
0
def view_travaux():
    amd_sorts = {
        'retire': u'Retiré',
        'tombe': u'Tombé',
        'adopte': u'Adopté',
        'rejete': u'Rejeté',
        'nonsoutenu': u'Non soutenu',
        'nonrenseigne': u'Non renseigné'
    }
    nb = int(request.args.get('itemsperpage', '25'))
    page = int(request.args.get('page', '1')) - 1
    search = request.args.get('requete', request.args.get('query', ''))
    depute = request.args.get('depute', None)
    groupe = request.args.get('groupe', None)
    cosig = request.args.get('cosignataire', None)
    sort = request.args.get('sort', None)
    ttype = request.args.get('type', None)
    asupp = request.args.get('suppression', None)

    skip = nb * page
    filters = []

    if 'suppression' in request.args.keys() and ttype == 'amendement':
        filters.append({'suppression': (asupp not in ("", "0"))})
    if sort and ttype == 'amendement' and sort in amd_sorts.keys():
        filters.append({'sort': amd_sorts[sort]})

    if depute:
        filters.append({'depute': depute})
        filters.append({'auteur': False if cosig else {'$in': [None, True]}})
    elif groupe:
        filters.append({'groupe': groupe})
    else:
        filters.append({'depute': None})

    if ttype == 'question':
        filters.append({'type': {'$in': ['QG', 'QE', 'QOSD']}})
    elif ttype == 'amendement':
        filters.append({'type': 'amendement'})
    elif ttype in [
            'QG', 'QE', 'QOSD', 'propositiondeloi', 'propositionderesolution',
            'rapportdinformation', 'rapport', 'avis', 'projetdeloi'
    ]:
        filters.append({'type': ttype})
    elif ttype == 'document':
        filters.append({
            'type': {
                '$nin': ['propositiondeloi', 'QG', 'QE', 'QOSD', 'amendement']
            }
        })

    if search:
        search = '"' + search + '"'

        def searchText():
            txt_amd = [
                a['id']
                for a in mdb.amendements.find({'$text': {
                    '$search': search
                }})
            ]
            txt_que = [
                q['id']
                for q in mdb.questions.find({'$text': {
                    '$search': search
                }})
            ]
            txt_doc = [
                d['id']
                for d in mdb.documentsan.find({'$text': {
                    '$search': search
                }})
            ]
            return txt_amd + txt_que + txt_doc

        cachekey = u"trvtxt%s" % (search)
        ids = use_cache(cachekey, lambda: searchText(), expires=3600)
        filters.append({'idori': {'$in': ids}})

    def makefilter(f):
        if len(f) == 0:
            mf = {}
        elif len(f) == 1:
            mf = f[0]
        else:
            mf = {'$and': f}
        return mf

    tfilter = makefilter(filters)
    print filters, tfilter

    travaux = list(
        mdb.travaux.find(tfilter).sort('date', -1).skip(skip).limit(nb))
    print travaux

    def countItems():
        rcount = mdb.travaux.find(tfilter).count()
        return {'totalitems': rcount}

    cachekey = u"trv%s_%s_%s_%s_%s_%s" % (depute, groupe, search, ttype, sort,
                                          cosig)
    counts = use_cache(cachekey, lambda: countItems(), expires=3600)

    import math
    for t in travaux:
        t['description'] = t['description'].replace(u'\u0092', "'").replace(
            u'\u2019', "'")
        t['dossier'] = t['dossier'].replace(u'\u0092',
                                            "'").replace(u'\u2019', "'")

    nbpages = int(math.ceil(float(counts['totalitems']) / nb))
    result = dict(nbitems=len(travaux),
                  nbpages=nbpages,
                  currentpage=1 + page,
                  itemsperpage=nb,
                  items=travaux,
                  **counts)
    return json_response(result)