Example #1
0
def getChamberList(chamber):

    congress = Congress(API_KEY)
    all_members = congress.members.filter(chamber)
    bills = congress.members.bills('P000197')

    for bill in bills:
        print(bill)

    # print (all_members)
    num_results = int(all_members[0]["num_results"])

    member_list = all_members[0]["members"]

    i = 0
    while i < num_results:
        state = member_list[i]["state"]
        dictPerson[i] = member_list[i]["id"]
        cdb_id = member_list[i]["id"]
        title = member_list[i]["short_title"]
        first_name = member_list[i]["first_name"]
        last_name = member_list[i]["last_name"]
        party = member_list[i]["party"]
        print("%s: %s %s %s (%s) %s" %
              (cdb_id, title, first_name, last_name, party, state))
        i += 1
Example #2
0
def query(q=''):
    if q:
        url = Congress.instance().search(q)
        return redirect(url)

    else:
        return {}
Example #3
0
    def __init__(self, cid=None):
        # APIs
        CRP.apikey = secrets['OPENSECRETS_API_KEY']
        self.congress = Congress(secrets['PROPUBLICA_API_KEY'])

        # The government member we'll tweet about
        self.candidate = self._get_candidate(cid)
Example #4
0
    def getPersonDetail(self):
        self.congress = Congress(self.API_KEY)
        senator = self.congress.members.get(App.member_id)
        '''
        https://theunitedstates.io/images/congress/[size]/[bioguide].jpg
        [size] can be one of:
        original - As originally downloaded. Typically, 675x825, but it can vary.
        450x550
        225x275
        '''

        try:
            photo_url = self.PHOTO_URL + App.member_id + '.jpg'
            self.ids.head_shot.source = photo_url
        except Exception as e:
            pass

        try:
            self.txtName.text = str(senator['first_name']) + ' ' + str(
                senator['last_name'])
            self.txtState.text = str(senator['roles'][0]['state'])
            self.txtParty.text = str(senator['roles'][0]['party'])
            self.txtChamber.text = str(senator['roles'][0]['chamber'])
            self.txtBirthday.text = str(senator['date_of_birth'])
            self.txtPhone.text = str(senator['roles'][0]['phone'])
            self.txtAddress.text = str(senator['roles'][0]['office'])
            self.txtVotes.text = str(
                senator['roles'][0]['missed_votes_pct']) + '%'

            if senator['url']:
                self.lblWeb.text = '[ref=web]Web[/ref]'
                self.lblWeb.bind(on_ref_press=lambda self, x: webbrowser.open(
                    str(senator['url'])))

            if senator['govtrack_id']:
                self.lblGovTrack.text = '[ref=govtrack]GovTrack[/ref]'
                url_name = str(senator['first_name'] + '_' +
                               str(senator['last_name']))
                gt_url = 'https://www.govtrack.us/congress/members/' + url_name + '/'
                self.lblGovTrack.bind(
                    on_ref_press=lambda self, x: webbrowser.open(gt_url + str(
                        senator['govtrack_id'])))

            if senator['votesmart_id']:
                self.lblVoteSmart.text = '[ref=votesmart]VoteSmart[/ref]'
                vs_url = 'https://votesmart.org/candidate/'
                self.lblVoteSmart.bind(
                    on_ref_press=lambda self, x: webbrowser.open(vs_url + str(
                        senator['votesmart_id'])))

            if senator['crp_id']:
                self.lblCrp.text = '[ref=crp]CRP[/ref]'
                crp_url = 'https://www.opensecrets.org/members-of-congress/summary?cid='
                self.lblCrp.bind(on_ref_press=lambda self, x: webbrowser.open(
                    crp_url + str(senator['crp_id'])))

        except KeyError:
            pass
Example #5
0
    def __init__(self, cid=None):
        # APIs
        self.crp = CRP(secrets['OPENSECRETS_API_KEY'])
        self.congress = Congress(secrets['PROPUBLICA_API_KEY'])

        # The government member we'll tweet about
        self.candidate = self._get_candidate(cid)

        # Support text functions
        self.spprt_funcs = [
            self._get_committee_text, self._get_vote_pct_text,
            self._get_net_worth_text
        ]
Example #6
0
    def test_django_cache(self):
        try:
            from django.conf import settings
            settings.configure(CACHE_BACKEND='locmem://')
            from django.core.cache import cache
        except ImportError:
            # no Django, so nothing to test
            return

        congress = Congress(API_KEY, cache)

        self.assertEqual(congress.http.cache, cache)
        self.assertEqual(congress.members.http.cache, cache)
        self.assertEqual(congress.bills.http.cache, cache)
        self.assertEqual(congress.votes.http.cache, cache)

        try:
            bills = congress.bills.introduced('house')
        except Exception as e:
            self.fail(e)
Example #7
0
    def __init__(self):
        super().__init__()

        # Set up the user interface from Designer.
        self.ui = Ui_MainWindow()
        self.resize(600, 325)
        self.setMinimumSize(QSize(600, 325))
        self.ui.setupUi(self)

        self.ui.buttonBox.clicked.connect(self.close)

        self.ui.listSenate.clicked.connect(self.onSenateClick)
        self.ui.listHouse.clicked.connect(self.onHouseClick)

        # Search 
        # Use lambda to pass signals with parameters
        self.ui.leSearchSenate.returnPressed.connect(lambda: self.onSearchClick('senate'))
        self.ui.leSearchHouse.returnPressed.connect(lambda: self.onSearchClick('house'))

        self.congress = Congress(self.API_KEY)
        self.getChamberList('senate')
        self.getChamberList('house')
    df_turnover_old = df_turnover_new
criteria_turnover = ((df_rep_data['YEAR LEFT'] - df_rep_data['YEAR JOINED']) >=
                     4)
for i in range(2011, 2017):
    criteria_turnover = criteria_turnover & (
        df_rep_data[str(i) + ' STAFF SIZE'] >= 5)
for i in range(2011, 2017):
    array_turnover[i -
                   2011] = df_rep_data[str(i) +
                                       ' TURNOVER'][criteria_turnover].mean()
print('Q6 - median of staff turnover (2011-2016):  ',
      (array_turnover[3] + array_turnover[3]) / 2,
      sep="")

#Q7
congress = Congress('h3jNxA0rvr4CYIfSCOXgVaq1Evr7Bis5WM8NCRva')
df_2016 = df_all[['BIOGUIDE_ID',
                  'AMOUNT']][(df_all['START DATE'].dt.year == 2016)
                             & (df_all['BIOGUIDE_ID'].notna())]
pivot_2016 = pd.pivot_table(df_2016,
                            index=["BIOGUIDE_ID"],
                            values='AMOUNT',
                            aggfunc=np.sum).sort_values(
                                by='AMOUNT',
                                ascending=False,
                                na_position='first').head(
                                    20)  # top twenty spenders in 2016
pivot_2016['PARTY'] = 0
for i in range(0, 20):
    dict_ID_info = congress.members.get(pivot_2016.index[i])
    if pd.DataFrame(dict_ID_info)['current_party'][0] == 'D':
Example #9
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.congress = Congress(self.API_KEY)

        rvSenate = ObjectProperty(None)
        rvHouse = ObjectProperty(None)
Example #10
0
def search(q=''):
    url = Congress.instance().search(q)

    return {'url': url}
Example #11
0
from sqlalchemy import create_engine

password = '******'

conn_string = 'mysql://{user}:{password}@{host}:{port}/?charset=utf8'.format(
    user='******',
    password=password,
    host='127.0.0.01',
    port=3306,
    encoding='utf-8')

engine = create_engine(conn_string)
con = engine.connect()

propublica_key = 'wAxQ7sF8gcXCBRnY3lzegT23aljM4saALOb6JPlR'
congress = Congress(propublica_key)


def sql(query, show=True):
    '''Outputs df with SQL query or executes SQL'''

    try:
        return pd.read_sql(query, con=engine)
    except:
        try:
            engine.execute(query)
            print('Executed:', query)
        except:
            print('###ERROR###')

Example #12
0
    def getPersonDetail(self):
        self.congress = Congress(self.API_KEY)
        senator = self.congress.members.get(self.m_person_id)

        # https://theunitedstates.io/images/congress/[size]/[bioguide].jpg
        # [size] can be one of:
        # original - As originally downloaded. Typically, 675x825, but it can vary.
        # 450x550
        # 225x275

        try:
            url = self.PHOTO_URL + self.m_person_id + '.jpg'
            img = QImage()
            data = urllib.request.urlopen(url).read()
            img.loadFromData(data)
        except Exception as e:
            pass

        self.person_detail.lblPhoto.setPixmap(QPixmap(img).scaledToWidth(100))

        try:
            self.person_detail.lblName.setText(
                str(senator['first_name']) + ' ' + str(senator['last_name']))
            self.person_detail.textState.setText(
                str(senator['roles'][0]['state']))
            self.person_detail.textParty.setText(
                str(senator['roles'][0]['party']))
            self.person_detail.textChamber.setText(
                str(senator['roles'][0]['chamber']))
            self.person_detail.textBirthday.setText(
                str(senator['date_of_birth']))
            self.person_detail.textPhone.setText(
                str(senator['roles'][0]['phone']))
            self.person_detail.textAddress.setText(
                str(senator['roles'][0]['office']))
            self.person_detail.textVotes.setText(
                str(senator['roles'][0]['missed_votes_pct']) + '%')

            if senator['url']:
                self.person_detail.lblWeb.setText('<a href=' + senator['url'] +
                                                  '>Web</a>')
                self.person_detail.lblWeb.setOpenExternalLinks(True)
            else:
                self.person_detail.lblWeb.setText('Web')

            if senator['govtrack_id']:
                url_name = str(senator['first_name'] + '_' +
                               str(senator['last_name']))
                url = 'https://www.govtrack.us/congress/members/' + url_name + '/'
                self.person_detail.lblGovTrack.setText(
                    '<a href=' + url + str(senator['govtrack_id']) +
                    '>GovTrack</a>')
                self.person_detail.lblGovTrack.setOpenExternalLinks(True)
            else:
                self.person_detail.lblGovTrack.setText('GovTrack')

            if senator['votesmart_id']:
                url = 'https://votesmart.org/candidate/'
                self.person_detail.lblVoteSmart.setText(
                    '<a href=' + url + str(senator['votesmart_id']) +
                    '>VoteSmart</a>')
                self.person_detail.lblVoteSmart.setOpenExternalLinks(True)
            else:
                self.person_detail.lblVoteSmart.setText('VoteSmart')

            if senator['crp_id']:
                # url = 'https://www.opensecrets.org/members-of-congress/summary?cid='
                # Need to escape the = sign with &#61;
                url = 'https://www.opensecrets.org/members-of-congress/summary?cid&#61;'
                crp_link = '<a href=' + url + str(
                    senator['crp_id']) + '>CRP</a>'
                self.person_detail.lblCrp.setText(crp_link)
                self.person_detail.lblCrp.setOpenExternalLinks(True)
            else:
                self.person_detail.lblCrp.setText('CRP')

        except KeyError:
            pass

        QApplication.restoreOverrideCursor()
Example #13
0
    r = requests.get(
        url=URL,
        headers={"X-API-Key": "iNjfHXFfsKjig0ujma5m5aooz2td8yyb3WJA00H5"})

    #pp.pprint(r.json())

    # f = open('something.json', 'r+')
    # f.write(str(r.json()))

    billid = r.json()["results"][0]["bills"][0]["bill_id"]

    return billid


congress = Congress("iNjfHXFfsKjig0ujma5m5aooz2td8yyb3WJA00H5")

# pelosi = congress.members.get('P000197')

# # pp.pprint(pelosi)

# pelosibill = congress.bills.by_member("P000197")

# #pp.pprint(pelosibill)

states = congress.nominations.by_state("Va")

pp.pprint(states)

# pp.pprint(pelosibill)
Example #14
0
def congress():
    api_key = 'testing_key'
    return Congress(api_key)
Example #15
0
"""
Supporting functions to get and parse names.

Using the ProPublica Congress API, these functions return different name variations.

"""
from congress import Congress
from secrets import api_key_congress

congress = Congress(api_key_congress)


def get_names(house="house", nametype="official"):
    members = get_members(house)
    if nametype == "official":
        return official(members)
    elif nametype == "partisan":
        return officialwparty(members)
    elif nametype == "unofficial":
        return unofficial(members)
    elif nametype == "neutral":
        return neutral(members)
    else:
        return officialwparty(members)


def get_members(chamber):
    """Specify house or senate for members"""
    members = congress.members.filter(chamber=chamber)[0]["members"]
    return members
Example #16
0
#roumanos: YgsKtATqn73bwDrv9ar1M3ElbWSKsjyY24dMWsgq
#mine: 9QhaTiNvHgaAMdXd8S8pE5kIeW1tBG8X7ucfOVtn
# C:\Users\clunieit\AppData\Local\Microsoft\AppV\Client\Integration\82D962A3-03E5-4FCB-A8F4-7CDF9095A30B\Root\VFS\AppVPackageDrive\Python36\python.exe -m pip install
# pip install python-congress	https://github.com/eyeseast/propublica-congress

import datetime, json, os, time, sys, urllib, httplib2, requests
from congress import Congress, CongressError, NotFound, get_congress

apiKey = '9QhaTiNvHgaAMdXd8S8pE5kIeW1tBG8X7ucfOVtn'

congress = Congress(apiKey)

#download all members JSON
urlSenate = "https://api.propublica.org/congress/v1/115/senate/members.json"
senateMems = requests.get(urlSenate, headers={'X-API-Key': apiKey})
urlHouse = "https://api.propublica.org/congress/v1/115/house/members.json"
#houseMems = requests.get(urlHouse, headers={'X-API-Key': apiKey})

senJSON = senateMems.json()
#housJSON = houseMems.json()

for membR in senJSON['results'][0]['members']:
    for item in membR:
        print(item, membR[item])
    sys.exit()

#congressFullMems = [[],[]]	#0=sen, 1=house

# for membR in senMembers['results'][0]['members']:
# fullMem = congress.members.get(membR['id'])
# congressFullMems[0].append(fullMem)
Example #17
0
from congress import Congress
import numpy as np
import pandas as pd
import time

api_key = 'Z7cnuQ3cufA08VfbWOoqURVpeSUuyT8QQazVwAGY'
congress = Congress(api_key)

df = pd.read_csv("../cleaned-data.csv")
bio_guides_ID = df["BIOGUIDE_ID"].unique()[1:]

dflegis = pd.read_csv("../legislators.csv")
dflegis = dflegis[[
    "title", "firstname", "middlename", "lastname", "party", "state",
    "in_office", "gender", "bioguide_id", "govtrack_id"
]]
# get member by bioguide ID
print(dflegis)
bios = list(dflegis.bioguide_id)

outof_bio = []
for i in bio_guides_ID:
    if not i in bios:
        outof_bio.append(i)
print(outof_bio)

legislators = []
for i in outof_bio[1:]:
    try:
        legislators.append(congress.members.get(i))
        print(i, congress.members.get(i))
Example #18
0
# Heroku Variables. You define these in Heroku's dashboard.
the_consumer_key = os.environ.get('the_consumer_key')
the_consumer_secret = os.environ.get('the_consumer_secret')
the_access_key = os.environ.get('the_access_key')
the_access_secret = os.environ.get('the_access_secret')
congress_key = os.environ.get('congress_key')

# Access keys from Twitter and ProPublica's API
consumer_key = the_consumer_key
consumer_secret = the_consumer_secret
access_key = the_access_key
access_secret = the_access_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
congress = Congress(congress_key)

api = tweepy.API(auth)

# Returns votes that happend on the day that the function is called. 
todays_votes = congress.votes.today('house')

# This function accepts 3 arguments: The chamber, either 'house' or 'senate'; roll_call_num, which is the roll call number for each vote, and a session.
def get_info(chamber, roll_call_num, sess):
	today_yoho_vote = congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['positions'][426]['vote_position']
	shortTitle = congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['bill']['short_title']
	billNum = congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['bill']['number']
	bill_desc = congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['description']
	which_congress = str(congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['congress'])
	iso_bill = congress.votes.get(chamber, roll_call_num, sess)['votes']['vote']['bill']['bill_id']
	cleaned_bill = re.sub("[^0-9\-.]", '', iso_bill)
import argparse
import os
import os.path as osp
from time import sleep

import pandas as pd
from congress import Congress
from congress.utils import NotFound
from dotenv import find_dotenv, load_dotenv

from congressideology.config import config

load_dotenv(find_dotenv())

congress = Congress(os.getenv('PROPUBLICA_API_KEY'))

KEEP_FIELDS = [
    'congress', 'bill_type', 'title', 'sponsor', 'sponsor_id', 'sponsor_party',
    'introduced_date', 'cosponsors_by_party', 'summary'
]


def collect_bill_data(congress_nums, output_file, timeout=0.1):
    bills = []
    for congress_num in congress_nums:
        votes = pd.read_csv(
            osp.join(config.data_dir, 'raw', 'votes',
                     f'HS{congress_num}_rollcalls.csv'))
        votes.dropna(subset=['bill_number'], axis=0, inplace=True)
        for bill_id in votes['bill_number'].unique():
            try:
Example #20
0
 def setUp(self):
     self.congress = Congress(API_KEY)
     self.http = httplib2.Http()
Example #21
0
from google.cloud.language import types
from congress import Congress
import six
import sys
import json
import urllib
import html2text
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2 import enums as enums2
from google.cloud.language_v1beta2 import types as types2
import subprocess
import google
import nltk
import wptools

congress = Congress('gt6jsrJY8cXmh6WmRYwK0820BFfrtZlf25fJSKlo')


def get_bill(id, session):
    url = 'https://api.propublica.org/congress/v1/%s/bills/%s.json' % (session,
                                                                       id)
    headers = {'X-API-Key': 'gt6jsrJY8cXmh6WmRYwK0820BFfrtZlf25fJSKlo'}
    req = urllib.request.Request(url, None, headers)
    response = urllib.request.urlopen(req).read()
    billinfo = json.loads(response)['results'][0]
    chamber = ""
    if billinfo['bill_type'][0] == 'h':
        chamber = 'house'
    elif billinfo['bill_type'][0] == 's':
        chamber = 'senate'
    sponsor_funding_list = get_congressman(billinfo['sponsor'], chamber)
Example #22
0
def collect_congress_tweets(congress_list, congress_tweets_file,
                            meta_info_file, start_date, twitter_creds,
                            chambers=None, propublica_api_key=None,
                            append_frequency=10, browser='Chrome',
                            fields=None, shuffle=False):
    """Collect tweets from American Congressmen.

    Parameters
    ----------
    congress_list : iterable
        List with Congress numbers to collect data for.
    congress_tweets_file : str
        Path to the output file with tweets.
    meta_info_file : str
        Path to the output file with meta information about the Congress.
    start_date : str
        The first date to start pulling extra tweets.
    twitter_creds : type
        Dictionary or list with Twitter authentication credentials.
        Has to contain consumer_key, consumer_secret, access_key, access_secret
    chambers : iterable, optional
        List of Chambers to collect tweets for (the default is Senate and House).
    propublica_api_key : str, optional
        API key for free Propublica Congress API (the default is None).
        https://www.propublica.org/datastore/api/propublica-congress-api
    append_frequency : int, optional
        Frequency of dumping new tweets to CSV (the default is 10).
    browser : str, optional
        Browser for Selenium to use. Corresponding browser and its webdriver
        have to be installed (the default is 'Chrome').
    fields : iter, optional
        Extra fields to pull from the tweets (the default is retweet_count and favorite_count).
    shuffle: bool, optional
        Whether to shuffle twitter handles before collecting.
    """
    if chambers is None:
        chambers = ['House', 'Senate']
    if fields is None:
        fields = ['retweet_count', 'favorite_count']

    if osp.isfile(meta_info_file):
        members = pd.read_csv(meta_info_file)
    else:
        congress = Congress(propublica_api_key)
        all_members = []
        for congress_num in congress_list:
            for chamber in chambers:
                members = pd.DataFrame(congress.members.filter(
                    chamber, congress=congress_num)[0]['members'])
                members['chamber'] = chamber
                members['congress_num'] = congress_num
                all_members.append(members)
        members = pd.concat(all_members)
        members.to_csv(meta_info_file, index=False)

    twitter_handles = members.twitter_account.unique()
    if shuffle:
        random.shuffle(twitter_handles)
    start_date = parser.parse(start_date).date()
    if osp.isfile(congress_tweets_file):
        tweets = pd.read_csv(congress_tweets_file,
                             lineterminator='\n', usecols=['screen_name'])
        parsed_handles = list(tweets['screen_name'].unique())
        del tweets
    else:
        parsed_handles = []

    dfs = []
    for i, twitter_handle in enumerate(twitter_handles):
        if twitter_handle in parsed_handles or pd.isnull(twitter_handle):
            continue

        try:
            df = grab_tweets(twitter_creds, screen_name=twitter_handle, timeout=1.0,
                             get_more=True, start_date=start_date, browser=browser, fields=fields)
        except Exception as e:
            warnings.warn(f'Exception occured for {twitter_handle}: {e}')
            continue

        parsed_handles.append(twitter_handle)
        if df.empty:
            continue
        df = df.loc[df.created_at >= pd.Timestamp(start_date)]
        dfs.append(df)
        if len(dfs) >= append_frequency or i == (len(twitter_handles) - 1):
            df = pd.concat(dfs)
            if osp.isfile(congress_tweets_file):
                df.to_csv(congress_tweets_file, mode='a',
                          header=False, index=False)
            else:
                df.to_csv(congress_tweets_file, index=False)
            dfs = []
Example #23
0
import discord
from discord.ext import commands
from congress import Congress
from discord_token import token

TOKEN = token

bot = commands.Bot(command_prefix='>')


@bot.event
async def on_ready():
    print(f"Bot is up & ready")


bot.add_cog(Congress(bot))
bot.run(TOKEN)
Example #24
0
from congress import Congress
from dotenv import load_dotenv
import os

load_dotenv()
congress = Congress(os.getenv("PROPUBLICA"))
# Scraper for the VoteTrackr Project that uses ProPublica API
# Should be used to produce initial data for the database of bills and votes
from congress import Congress
import json

# Constants
PROPUBLICA_API_KEY = 'AfFAT83Y5LHoCEvkGgwjbjrtVrZgVSgp18YXiF0R'
congress = Congress(PROPUBLICA_API_KEY)


# Scrapes all relevant bills from the ProPublica API for the 155th congress
def scrape_bills():
    senate_bills = []
    for i in range(0, 3700):
        try:
            cur_bill = congress.bills.get('s' + str(i))
        except:
            continue

        if (cur_bill['cosponsors'] < 8) or (
                'res' in cur_bill['bill_type']) or not cur_bill['last_vote']:
            continue
        print(cur_bill)
        cur_dict = {
            'BID': cur_bill['bill_id'],
            'Description': cur_bill['title'],
            'Name': cur_bill['short_title'],
            'DateIntroduced': cur_bill['introduced_date'],
            'Status': cur_bill['latest_major_action'],
            'CongressN': cur_bill['bill_id'][-3:],
            'URL': cur_bill['govtrack_url'],