Beispiel #1
0
def dump_candidate_sked(sked_name, candidate_id, destination_file):
    """
    Blindly dump a csv file of a candidate's authorized committees, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 

    The rule is a body line is superceded EITHER if it's parent filing is superceded, or if the line itself is superceded. F24's and F6's are superceded line by line--though perhaps that could be improved on. 

    """

    # break if we're given junk args. 
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()
    
    ## first get the list of authorized committees. 
    acc_query = "select committee_id from  summary_data_authorized_candidate_committees where candidate_id='%s'" % candidate_id
    cursor.execute(acc_query);
    results = cursor.fetchall()
    committees = ["'" + x[0] + "'" for x in results]
    committee_formatted_list = ", ".join(committees)
    
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False and fec_alerts_new_filing.fec_id in (%s)) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING, committee_formatted_list, destination_file)
    cursor.execute(dumpcmd);
Beispiel #2
0
def register():
    if request.method == 'GET':
        return render_template('register.html', **context)

    if request.method == 'POST':
        if (chc_user(request.form['userName'], request.form['email'])):
            if (request.form['inputPassword'] == request.form['repeatPassword']
                ):
                conn = get_connection()
                cursor = conn.cursor()

                repetitions = 5  # spróbuj wygenerować kilka hash
                password = request.form['inputPassword']

                for _ in range(repetitions):
                    hashed_password = generate_password_hash(password)

                cursor.execute(
                    'INSERT INTO users(name, mail, password) VALUES(?, ?, ?)',
                    (request.form['userName'], request.form['email'],
                     hashed_password))
                conn.commit()
                conn.rollback()
                return render_template('login.html', **context)
            else:
                context['message'] = "Passwords are not the same"
        else:
            context['messagePass'] = "******"
        context['user'] = request.form['userName']
        context['email'] = request.form['email']
        return render_template('register.html', **context)
Beispiel #3
0
def dump_big_contribs(destination_file, CYCLE):
    # This is contributions to super-pacs greater than $5,000 + reported contributions to non-committees greater than $5,000, plus line 17 (other federal receipts) of $5,000 or more to hybrid pacs (see http://www.fec.gov/press/Press2011/20111006postcarey.shtml). Valid 'other federal receipts' incurred by the hybrid pac of $5,000 plus will also show up in this line... 
    
    ## update: The postcarey guidance is ignored on reports like this:
    ## http://docquery.fec.gov/cgi-bin/dcdev/forms/C00542217/882740/sa/11AI
    ## so instead just look at all lines there too....
    
    sked_name = 'a'
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()

    cycle_details = cycle_calendar[int(CYCLE)]
    
    # need to join to get the committee name. 
    
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number  WHERE (memo_code isnull or not memo_code = 'X') and committee_type in ('I', 'O', 'U', 'V', 'W') and superceded_by_amendment=False and contribution_amount >= 10000 and %s >= %s and  %s <= %s and  is_superceded=False) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, get_db_formatted_string(cycle_details['start']), datefield, get_db_formatted_string(cycle_details['end']), destination_file)
    print dumpcmd
    start = time.time()
    result = cursor.execute(dumpcmd);
    elapsed_time = time.time() - start
    print "elapsed time for dumping big contribs: %s" % ( elapsed_time)
Beispiel #4
0
def dump_big_contribs(destination_file):
    # This is contributions to super-pacs greater than $5,000 + reported contributions to non-committees greater than $5,000, plus line 17 (other federal receipts) of $5,000 or more to hybrid pacs (see http://www.fec.gov/press/Press2011/20111006postcarey.shtml). Valid 'other federal receipts' incurred by the hybrid pac of $5,000 plus will also show up in this line...

    ## update: The postcarey guidance is ignored on reports like this:
    ## http://docquery.fec.gov/cgi-bin/dcdev/forms/C00542217/882740/sa/11AI
    ## so instead just look at all lines there too....

    sked_name = 'a'
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()

    # need to join to get the committee name.

    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number  WHERE (memo_code isnull or not memo_code = 'X') and committee_type in ('I', 'O', 'U', 'V', 'W') and superceded_by_amendment=False and contribution_amount >= 10000 and %s >= %s and is_superceded=False) to '%s' with csv header quote as '"' escape as '\\'""" % (
        fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING,
        destination_file)
    #print dumpcmd
    start = time.time()
    result = cursor.execute(dumpcmd)
    elapsed_time = time.time() - start
    print "elapsed time for dumping big contribs: %s" % (elapsed_time)
Beispiel #5
0
def output_alm():
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            # cursor.execute("SELECT * FROM products")
            querry = f"""
            SELECT alarms.id, products.name, alm_type.name as eq, alarms.val, alarms.message, users.name as user
            FROM products
            INNER JOIN alarms
            ON products.id = alarms.prod_id
            INNER JOIN alm_type
            ON alarms.type_id = alm_type.id
            INNER JOIN users_alm
            ON alarms.id = users_alm.alm_id
            INNER JOIN users
            ON users_alm.user_id = users.id
            WHERE users.name == '{session['username']}'
            """
            cursor.execute(querry)
            rows = cursor.fetchall()
            return render_template('output_alm.html', rows=rows, **context)

    return "Brak danych"
Beispiel #6
0
def dump_candidate_sked(sked_name, candidate_id, destination_file):
    """
    Blindly dump a csv file of a candidate's authorized committees, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 

    The rule is a body line is superceded EITHER if it's parent filing is superceded, or if the line itself is superceded. F24's and F6's are superceded line by line--though perhaps that could be improved on. 

    """

    # break if we're given junk args.
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()

    ## first get the list of authorized committees.
    acc_query = "select committee_id from  summary_data_authorized_candidate_committees where candidate_id='%s'" % candidate_id
    cursor.execute(acc_query)
    results = cursor.fetchall()
    committees = ["'" + x[0] + "'" for x in results]
    committee_formatted_list = ", ".join(committees)

    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False and fec_alerts_new_filing.fec_id in (%s)) to '%s' with csv header quote as '"' escape as '\\'""" % (
        fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING,
        committee_formatted_list, destination_file)
    cursor.execute(dumpcmd)
Beispiel #7
0
def dump_all_F6_contribs(destination_file):

    sked_name = 'A'

    # break if we're given junk args.
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()

    # need to join to get the committee name.
    # dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number  WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING, destination_file)

    dumpcmd = """copy (SELECT %s FROM formdata_skeda left join fec_alerts_new_filing on formdata_skeda.filing_number = fec_alerts_new_filing.filing_number WHERE fec_alerts_new_filing.form_type in ('F6N', 'F6A', 'F6') and superceded_by_amendment=False) to '%s' with csv header quote as '"' escape as '\\'""" % (
        fieldlist, destination_file)

    start = time.time()
    result = cursor.execute(dumpcmd)
    elapsed_time = time.time() - start
    print dumpcmd
    print "elapsed time for dumping sked %s: %s" % (sked_name, elapsed_time)
Beispiel #8
0
 def uninstall():
     conn = db_utils.get_connection()
     conn.execute("drop table if exists representative_codes")
     conn.execute("drop table if exists tasks")
     conn.execute("drop table if exists representatives")
     conn.execute("drop table if exists users")
     conn.execute("drop table if exists shelters")
Beispiel #9
0
def login():
    if request.method == 'GET':
        messages = get_flashed_messages()
        return render_template('login.html', messages=messages)

    if request.method == 'POST':
        username = request.form['username']
        password = request.form['password']

        conn = get_connection()
        c = conn.cursor()

        result = c.execute('SELECT * FROM users WHERE username = ?',
                           (username, ))
        user_data = result.fetchone()

        if user_data:
            hashed_password = user_data['password']

            if check_password_hash(hashed_password, password):
                session['user_id'] = user_data['id']
                session['username'] = user_data['username']
                session['is_admin'] = bool(user_data['is_admin'])
                return redirect('/')

        flash('błędna nazwa użytkownika lub hasło')
        return redirect('/login')
    def post(self):
        conn = db_utils.get_connection()

        try:
            data = json.loads(self.request.body)
            try:
                code = data['code']
                data['shelter_id'] = self.get_shelter_id(conn, code)
            except:
                self.write(
                    json.dumps({
                        'result':
                        'fail',
                        'error':
                        1,
                        'error_description':
                        "Your code is not a valid representative code"
                    }))
                return
            birthday = ''
            for el in reversed(data['birthday'].split('.')):
                birthday += '-' + el
            birthday = birthday[1:]

            data['birthday'] = birthday

            conn.execute(u"""
            insert into representatives(user_id, first_name, last_name, birthday, photo, shelter_id)
            values({user_id}, '{first_name}', '{last_name}', '{birthday}', '{photo}', '{shelter_id}')
            on duplicate key update first_name='{first_name}', last_name='{last_name}', birthday='{birthday}', photo='{photo}', shelter_id='{shelter_id}';
                """.format(**data))
            self.write(json.dumps({'result': 'ok'}))
        except Exception, e:
            logging.warn('cannot insert data: ' + str(e))
            self.write(json.dumps({'result': 'fail'}))
Beispiel #11
0
def login():
    context = {'now': datetime.datetime.now().year, 'user': None, 'email': None, 'message': None, 'act_alm': 0}

    if request.method == 'GET':
        return render_template('login.html', **context)

    if request.method == 'POST':
        username = request.form['name']
        password = request.form['password']

        conn = get_connection()
        c = conn.cursor()

        result = c.execute('SELECT * FROM users WHERE mail = ?', (username,))
        user_data = result.fetchone()
        if user_data:
            hashed_password = user_data['password']
            if check_password_hash(hashed_password, password):
                session['user_id'] = user_data['id']
                session['username'] = user_data['name']
                context['email'] = user_data['name']
                context['user'] = session['username']
                return render_template('dashboard.html', **context)

        context['message']="Wrong user or password"
        return render_template('login.html', **context)
Beispiel #12
0
def podaj_DNA():
    username = session.get('username')

    conn = get_connection()
    c = conn.cursor()

    result = c.execute('SELECT nazwa FROM sekwencje WHERE user_id = ?',
                       (username, ))
    zawartosc = result.fetchall()
    nazwy = []
    for row in zawartosc:
        nazwy.append(row['nazwa'])

    nazwa_projektu = request.args.get('name')
    if not nazwa_projektu:
        return render_template('error_no_input_nazwa.html')
    elif nazwa_projektu in nazwy:
        return render_template('error_project_name_exists.html')

    else:
        czas = datetime.now()
        data = str(czas.date())
        godzina = str(czas.time())
        dane = {'nazwa': nazwa_projektu, 'data': data, 'godzina': godzina}

        with open('Wpis.json', 'w') as f:
            json.dump(dane, f)

        return render_template('podaj_DNA.html', username=username)
Beispiel #13
0
def input():
    send_info()
    if request.method == 'GET':
        if os.path.isfile('fridge.db'):
            conn = get_connection()
            cursor = conn.cursor()

            with conn:
                cursor.execute("SELECT * FROM types")
                rows = cursor.fetchall()

            if 'product_type' not in context.keys():
                context['product_type'] = rows

            default = 'Drinks'

            return render_template('input.html', **context, default=default)
        else:
            return "Brak pliku bazy danych"

    if request.method == 'POST':
        try:
            quantity = int(request.form['quantity'])
            qbool = True
        except:
            context['message'] = "Quantity must be INT"
            qbool = False

        if (qbool):
            conn = get_connection()
            cursor = conn.cursor()

            with conn:
                # cursor.execute("SELECT * FROM products")
                typeName = request.form['type']
                querry = f"SELECT id FROM types WHERE name=='{typeName}'"
                cursor.execute(querry)
                id = cursor.fetchone()
                id = id['id']

            cursor.execute(
                "INSERT INTO products(name, quantity, type_id) VALUES(?, ?, ?)",
                (request.form['name'], int(request.form['quantity']), id))
            conn.commit()
            conn.rollback()

        return render_template('input.html', **context)
Beispiel #14
0
def dodaj_uzytkownika(username, hashed_password, is_admin):
    conn = get_connection()
    c = conn.cursor()
    dane = {'username': username, 'password': hashed_password, 'is_admin': is_admin}
    zapytanie = """INSERT INTO 'users' VALUES (NULL, :username, :password, :is_admin); """
    c.execute(zapytanie, dane)
    conn.commit()
    conn.close()
Beispiel #15
0
def neutral_tweets():
    mydb = db.get_connection()
    sql = "select count(1)  from tweets where sentiment = 'neu';"
    final_result = []
    cursor = mydb.cursor()
    cursor.execute(sql)
    for i, row in enumerate(cursor.fetchall()):
        final_result.append(row[i])
    mydb.close()
    return final_result[0]
Beispiel #16
0
def top_tweets():
    mydb = db.get_connection()
    sql = "select * from search_terms order by search_qty desc limit 3;"
    final_result = []
    cursor = mydb.cursor()
    cursor.execute(sql)
    resultset = cursor.fetchall()
    for entry in resultset:
        final_result.append(entry)
    mydb.close()
    return final_result
Beispiel #17
0
def total_searchs():
    mydb = db.get_connection()
    sql = "select sum(search_qty) from search_terms;"
    final_result = 0
    cursor = mydb.cursor()
    cursor.execute(sql)
    resultset = cursor.fetchone()
    for result in resultset:
        final_result = result
    mydb.close()
    return final_result
Beispiel #18
0
def findRecipie():
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            cursor.execute("SELECT * FROM recipies")
            rows = cursor.fetchall()
            return render_template('find_recipie.html', rows=rows, **context)

    return "Brak danych"
Beispiel #19
0
def details(id):
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            cursor.execute(f"SELECT * FROM recipies WHERE Id={id}")
            rows = cursor.fetchone()
            return render_template('details_recipie.html',
                                   rows=rows,
                                   **context)
Beispiel #20
0
def chc_user(user_name, user_email):
    conn = get_connection()
    cursor = conn.cursor()

    with conn:
        cursor.execute(
            f"SELECT * FROM users WHERE name='{user_name}' or mail='{user_email}'"
        )
        rows = cursor.fetchall()
        if len(rows) > 0:
            return False
    return True
Beispiel #21
0
def output():
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            # cursor.execute("SELECT * FROM products")
            querry = "SELECT products.id, products.name, products.quantity, types.name as 'type' FROM products INNER JOIN types ON products.type_id = types.id;"
            cursor.execute(querry)
            rows = cursor.fetchall()
            return render_template('output.html', rows=rows, **context)

    return "Brak danych"
Beispiel #22
0
def delete_rec(id):
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            querry = f"DELETE FROM recipies WHERE Id={id}"
            cursor.execute(querry)
            conn.commit()
            conn.rollback()

            return redirect("/find_recipie/")
    return "Brak danych"
Beispiel #23
0
def summery():
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            cursor.execute(
                "SELECT name, sum(quantity) as quantity FROM products GROUP BY name"
            )
            rows = cursor.fetchall()
            return render_template('summary.html', rows=rows, **context)

    return "Brak danych"
Beispiel #24
0
def zmien():
    id = session.get('user_id')
    username = session.get('username')

    dane_uzytkownika = {'id': id, 'username': username}
    zapisuje_json(dane_uzytkownika)

    komplementarne_kod = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}

    RNA_kod = {'A': 'A', 'T': 'U', 'G': 'G', 'C': 'C'}
    wybor = request.args.get('typ')
    wybor_dict = {'rodzaj_zmiany': wybor}
    zapisuje_json(wybor_dict)

    with open('Sekwencja.txt', mode='r') as f:
        sekw = f.read()
        zmieniona = ''

        if wybor == 'RNA':
            zmieniona = zmieniaj(sekw, RNA_kod)
            zmiana = 'RNA'

        elif wybor == 'reverse':
            zmieniona = zmieniaj(sekw, komplementarne_kod)
            zmiana = 'DNA nić odwrócona'

        elif wybor == 'protein':
            zmieniona = translacja(sekw)
            zmiana = 'Białko'

    zmieniona_dict = {'sekwencja_wyjscie': zmieniona}

    with open('Rezultat.txt', mode='w') as f:
        f.write(zmieniona)
    zapisuje_json(zmieniona_dict)
    with open('Wpis.json') as f:
        wpis = json.load(f)
        zapytanie = """INSERT INTO 'sekwencje' VALUES (NULL, :username, :nazwa, :sekwencja_wejscie, :rodzaj_zmiany, :sekwencja_wyjscie, :data, :godzina); """
        conn = get_connection()
        c = conn.cursor()
        c.execute(zapytanie, wpis)
        conn.commit()
        conn.close()

    return render_template('zmien.html',
                           dane=zmieniona,
                           sekwencja=sekw,
                           zmiana=zmiana,
                           username=username)
Beispiel #25
0
def check_alarms():
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            cursor.execute(
                "SELECT name, sum(quantity) as quantity FROM products GROUP BY name"
            )
            products = cursor.fetchall()
            products_dict = {}
            for product in products:
                products_dict[product['name']] = product['quantity']

            cursor.execute(f"""
                            SELECT products.name, alm_type.name as eq, alarms.val, alarms.message, users.id as user
                            FROM products
                            INNER JOIN alarms
                            ON products.id = alarms.prod_id
                            INNER JOIN alm_type
                            ON alarms.type_id = alm_type.id
                            INNER JOIN users_alm
                            ON alarms.id = users_alm.alm_id
                            INNER JOIN users
                            ON users_alm.user_id = users.id
                            WHERE user={session['user_id']}            
                            """)
            alarms = cursor.fetchall()
            active_alarms = []
            for alarm in alarms:
                if alarm['eq'] == 'LT' and (products_dict[alarm['name']] <
                                            alarm['val']):
                    active_alarms.append(alarm)

                if alarm['eq'] == 'LE' and (products_dict[alarm['name']] <=
                                            alarm['val']):
                    active_alarms.append(alarm)

                if alarm['eq'] == 'GT' and (products_dict[alarm['name']] >
                                            alarm['val']):
                    active_alarms.append(alarm)

                if alarm['eq'] == 'GE' and (products_dict[alarm['name']] >=
                                            alarm['val']):
                    active_alarms.append(alarm)

            return active_alarms

    return []
Beispiel #26
0
def home():
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            cursor.execute(
                "SELECT types.name, sum(quantity) as 'quantity' FROM products INNER JOIN types ON products.type_id = types.id GROUP BY type_id"
            )
            rows = cursor.fetchall()

        return render_template('dashboard.html', **context, rows=rows)
    else:
        return "Brak pliku bazy danych"
Beispiel #27
0
def dump_filing_sked(sked_name, filing_number, destination_file):
    """
    Blindly dump a csv file of an entire filing, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 
    """
    
    # break if we're given junk args. 
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    filing_number = int(filing_number)
    fieldlist = fields[sked_name]
    
    connection = get_connection()
    cursor = connection.cursor()
    
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE fec_alerts_new_filing.filing_number = %s and superceded_by_amendment=False) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, filing_number, destination_file)
    cursor.execute(dumpcmd);
Beispiel #28
0
    def post(self, id=None):
        where_statement = ""
        if id:
            where_statement += " where shelter_id = {}".format(id)

        conn = db_utils.get_connection()
        rows = conn.query("""
                select * 
                from representatives
                {0};
            """.format(where_statement))

        for row in rows:
            row['birthday'] = row['birthday'].strftime("%Y-%m-%d")

        self.finish({'result': 'ok', 'data': json.dumps(rows)})
Beispiel #29
0
def dump_filing_sked(sked_name, filing_number, destination_file):
    """
    Blindly dump a csv file of an entire filing, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 
    """

    # break if we're given junk args.
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    filing_number = int(filing_number)
    fieldlist = fields[sked_name]

    connection = get_connection()
    cursor = connection.cursor()

    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE fec_alerts_new_filing.filing_number = %s and superceded_by_amendment=False) to '%s' with csv header quote as '"' escape as '\\'""" % (
        fieldlist, sked_name, sked_name, filing_number, destination_file)
    cursor.execute(dumpcmd)
Beispiel #30
0
def delete_alm(id):
    send_info()
    if os.path.isfile('fridge.db'):
        conn = get_connection()
        cursor = conn.cursor()

        with conn:
            querry = f"DELETE FROM alarms WHERE Id={id}"
            cursor.execute(querry)
            conn.commit()
            conn.rollback()

            querry = f"DELETE FROM users_alm WHERE user_id={id} AND alm_id={session['user_id']}"
            cursor.execute(querry)
            conn.commit()
            conn.rollback()
            return redirect("/output_alm/")
    return "Brak danych"
Beispiel #31
0
def dump_big_non_indiv_contribs(destination_file):
    # This is contributions to super-pacs greater than $5,000 + reported contributions to non-committees greater than $5,000, plus line 17 (other federal receipts) of $5,000 or more to hybrid pacs (see http://www.fec.gov/press/Press2011/20111006postcarey.shtml). Valid 'other federal receipts' incurred by the hybrid pac of $5,000 plus will also show up in this line... 

    sked_name = 'a'
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()

    # need to join to get the committee name. 
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number  WHERE (memo_code isnull or not memo_code = 'X') and committee_type in ('I', 'O', 'U', 'V', 'W')  and contributor_organization_name <> '' and superceded_by_amendment=False and contribution_amount >= 10000 and %s >= %s and is_superceded=False) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING, destination_file)
    #print dumpcmd
    start = time.time()
    result = cursor.execute(dumpcmd);
    elapsed_time = time.time() - start
    print "elapsed time for dumping big non-individual contribs: %s" % ( elapsed_time)
Beispiel #32
0
def dump_committee_sked(sked_name, committee_number, destination_file):
    """
    Blindly dump a csv file of an entire committee, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 
    
    The rule is a body line is superceded EITHER if it's parent filing is superceded, or if the line itself is superceded. F24's and F6's are superceded line by line--though perhaps that could be improved on. 
    
    """
    
    # break if we're given junk args. 
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]
    
    connection = get_connection()
    cursor = connection.cursor()
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False and fec_alerts_new_filing.fec_id = '%s') to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING, committee_number, destination_file)
    cursor.execute(dumpcmd);
Beispiel #33
0
def dump_all_sked(sked_name, destination_file):
    

    # break if we're given junk args. 
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]
    
    connection = get_connection()
    cursor = connection.cursor()
    
    # need to join to get the committee name. 
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number  WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False) to '%s' with csv header quote as '"' escape as '\\'""" % (fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING, destination_file)
    start = time.time()
    result = cursor.execute(dumpcmd);
    elapsed_time = time.time() - start
    print dumpcmd
    print "elapsed time for dumping sked %s: %s" % (sked_name, elapsed_time)
    def post(self):
        conn = db_utils.get_connection()

        try:
            data = json.loads(self.request.body)
            birthday = ''
            for el in reversed(data['birthday'].split('.')):
                birthday += '-' + el
            birthday = birthday[1:]

            data['birthday'] = birthday

            conn.execute(u"""
            insert into users(user_id, first_name, last_name, birthday, photo)
            values({user_id}, '{first_name}', '{last_name}', '{birthday}', '{photo}')
            on duplicate key update first_name='{first_name}', last_name='{last_name}', birthday='{birthday}', photo='{photo}';
                """.format(**data))
            self.write(json.dumps({'result': 'ok'}))
        except Exception, e:
            logging.warn('cannot insert data: ' + str(e))
            self.write(json.dumps({'result': 'fail'}))
Beispiel #35
0
def dump_committee_sked(sked_name, committee_number, destination_file):
    """
    Blindly dump a csv file of an entire committee, regardless of size. *Some filings are 200MB plus -- see #876048 or ACTBLUE's monthly stuff. 
    
    The rule is a body line is superceded EITHER if it's parent filing is superceded, or if the line itself is superceded. F24's and F6's are superceded line by line--though perhaps that could be improved on. 
    
    """

    # break if we're given junk args.
    sked_name = sked_name.lower()
    assert sked_name in ['a', 'b', 'e']
    fieldlist = fields[sked_name]
    datefieldkey = "%s_date" % (sked_name)
    datefield = fields[datefieldkey]

    connection = get_connection()
    cursor = connection.cursor()
    dumpcmd = """copy (SELECT %s FROM formdata_sked%s left join fec_alerts_new_filing on formdata_sked%s.filing_number = fec_alerts_new_filing.filing_number WHERE superceded_by_amendment=False and %s >= %s and is_superceded=False and fec_alerts_new_filing.fec_id = '%s') to '%s' with csv header quote as '"' escape as '\\'""" % (
        fieldlist, sked_name, sked_name, datefield, CYCLE_START_STRING,
        committee_number, destination_file)
    cursor.execute(dumpcmd)
Beispiel #36
0
    def post(self, id=None):
        where_statement = ""
        if id:
            where_statement += " where s.id = {}".format(id)

        conn = db_utils.get_connection()
        rows = conn.query("""
                select 
                    s.id, 
                    s.name,
                    s.address, 
                    s.photo, 
                    s.site

                from shelters as s
                {0};
            """.format(where_statement))

        self.finish({
            'result': 'ok',
            'data': json.dumps(rows[0] if id else rows)
        })
def process_filing_body(filingnum, fp=None, logger=None):
    
    
    #It's useful to pass the form parser in when running in bulk so we don't have to keep creating new ones. 
    if not fp:
      fp = form_parser()
      
    if not logger:
        logger=fec_logger()
    msg = "process_filing_body: Starting # %s" % (filingnum)
    #print msg
    logger.info(msg)
      
    connection = get_connection()
    cursor = connection.cursor()
    cmd = "select fec_id, is_superceded, data_is_processed from fec_alerts_new_filing where filing_number=%s" % (filingnum)
    cursor.execute(cmd)
    
    cd = CSV_dumper(connection)
    
    result = cursor.fetchone()
    if not result:
        msg = 'process_filing_body: Couldn\'t find a new_filing for filing %s' % (filingnum)
        logger.error(msg)
        raise FilingHeaderDoesNotExist(msg)
        
    # will throw a TypeError if it's missing.
    header_id = 1
    is_amended = result[1]
    is_already_processed = result[2]
    if is_already_processed:
        msg = 'process_filing_body: This filing has already been entered.'
        logger.error(msg)
        raise FilingHeaderAlreadyProcessed(msg)
    
    #print "Processing filing %s" % (filingnum)
    try:
        f1 = filing(filingnum)
    except:
        print "*** couldn't handle filing %s" % (filingnum)
        return False
    form = f1.get_form_type()
    version = f1.get_version()
    filer_id = f1.get_filer_id()
    
    # only parse forms that we're set up to read
    
    if not fp.is_allowed_form(form):
        if verbose:
            msg = "process_filing_body: Not a parseable form: %s - %s" % (form, filingnum)
            # print msg
            logger.error(msg)
        return None
        
    linenum = 0
    while True:
        linenum += 1
        row = f1.get_body_row()
        if not row:
            break
        
        #print "row is %s" % (row)
        #print "\n\n\nForm is %s" % form
        try:
            linedict = fp.parse_form_line(row, version)
            if linedict['form_type'].upper().startswith('SE'):
                print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
                # make sure the transaction isn't already there before entering. 
                try:
                    SkedE.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
                except SkedE.DoesNotExist:
                    process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)

            elif linedict['form_type'].upper().startswith('SA'):
                print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
                # make sure the transaction isn't already there before entering. 
                try:
                    SkedA.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
                    print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
                except SkedA.DoesNotExist:
                    process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)


            elif linedict['form_type'].upper().startswith('SB'):
                print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
                # make sure the transaction isn't already there before entering. 
                try:
                    SkedB.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
                    print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
                except SkedB.DoesNotExist:
                    process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
            
            
        except ParserMissingError:
            msg = 'process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping.' % (filingnum, linenum, row[0])
            logger.warn(msg)
            continue
        except KeyError:
            "missing form type? in filing %s" % (filingnum)
    
    # commit all the leftovers
    cd.commit_all()
    cd.close()
    counter = cd.get_counter()
    total_rows = 0
    for i in counter:
        total_rows += counter[i]
        
    msg = "process_filing_body: Filing # %s Total rows: %s Tally is: %s" % (filingnum, total_rows, counter)
    # print msg
    logger.info(msg)
    
    
    # don't commit during testing of fix 
    
    # this data has been moved here. At some point we should pick a single location for this data. 
    header_data = dict_to_hstore(counter)
    cmd = "update fec_alerts_new_filing set lines_present='%s'::hstore where filing_number=%s" % (header_data, filingnum)
    cursor.execute(cmd)
    
    # mark file as having been entered. 
    cmd = "update fec_alerts_new_filing set data_is_processed = True where filing_number=%s" % (filingnum)
    cursor.execute(cmd)
    
    # flag this filer as one who has changed. 
    cmd = "update summary_data_committee_overlay set is_dirty=True where fec_id='%s'" % (filer_id)
    cursor.execute(cmd)
def process_filing_body(filingnum, fp=None, logger=None):

    # It's useful to pass the form parser in when running in bulk so we don't have to keep creating new ones.
    if not fp:
        fp = form_parser()

    if not logger:
        logger = fec_logger()
    msg = "process_filing_body: Starting # %s" % (filingnum)
    # print msg
    logger.info(msg)

    connection = get_connection()
    cursor = connection.cursor()
    cmd = "select fec_id, superseded_by_amendment, data_is_processed from efilings_filing where filing_number=%s" % (
        filingnum
    )
    cursor.execute(cmd)

    cd = CSV_dumper(connection)

    result = cursor.fetchone()
    if not result:
        msg = "process_filing_body: Couldn't find a new_filing for filing %s" % (filingnum)
        logger.error(msg)
        raise FilingHeaderDoesNotExist(msg)

    # will throw a TypeError if it's missing.
    line_sequence = 1
    is_amended = result[1]
    is_already_processed = result[2]
    if is_already_processed == "1":
        msg = "process_filing_body: This filing has already been entered."
        print msg
        logger.error(msg)
        raise FilingHeaderAlreadyProcessed(msg)

    # print "Processing filing %s" % (filingnum)
    f1 = filing(filingnum)
    form = f1.get_form_type()
    version = f1.get_version()
    filer_id = f1.get_filer_id()

    # only parse forms that we're set up to read

    if not fp.is_allowed_form(form):
        if verbose:
            msg = "process_filing_body: Not a parseable form: %s - %s" % (form, filingnum)
            print msg
            logger.info(msg)
        return None

    linenum = 0
    while True:
        linenum += 1
        row = f1.get_body_row()
        if not row:
            break

        # print "row is %s" % (row)
        # print "\n\n\nForm is %s" % form
        try:
            linedict = fp.parse_form_line(row, version)
            # print "\n\n\nform is %s" % form
            process_body_row(linedict, filingnum, line_sequence, is_amended, cd, filer_id)
        except ParserMissingError:
            msg = "process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping." % (
                filingnum,
                linenum,
                row[0],
            )
            print msg
            logger.warn(msg)
            continue

    # commit all the leftovers
    cd.commit_all()
    cd.close()
    counter = cd.get_counter()
    total_rows = 0
    for i in counter:
        total_rows += counter[i]

    msg = "process_filing_body: Filing # %s Total rows: %s Tally is: %s" % (filingnum, total_rows, counter)
    print msg
    logger.info(msg)

    ######## DIRECT DB UPDATES. PROBABLY A BETTER APPROACH, BUT...

    header_data = dict_to_hstore(counter)
    cmd = "update efilings_filing set lines_present='%s'::hstore where filing_number=%s" % (header_data, filingnum)
    cursor.execute(cmd)

    # mark file as having been entered.
    cmd = "update efilings_filing set data_is_processed='1' where filing_number=%s" % (filingnum)
    cursor.execute(cmd)

    # flag this filer as one who has changed.
    cmd = "update efilings_committee set is_dirty=True where fec_id='%s' and cycle='%s'" % (filer_id, CURRENT_CYCLE)
    cursor.execute(cmd)

    # should also update the candidate is dirty flag too by joining w/ ccl table.
    # these tables aren't indexed, so do as two separate queries.
    cmd = "select cand_id from ftpdata_candcomlink where cmte_id = '%s' and cmte_dsgn in ('A', 'P')" % (filer_id)
    cursor.execute(cmd)
    result = cursor.fetchone()
    if result:
        cand_id = result[0]
        cmd = "update efilings_candidate set is_dirty=True where fec_id = '%s' and cycle='%s'" % (
            cand_id,
            CURRENT_CYCLE,
        )
        cursor.execute(cmd)

    connection.close()