コード例 #1
0
def initiate():
    start_time = time()

    print "\n\ndeleting _master_prospects"
    del_qry = """DROP TABLE IF EXISTS _master_prospects;"""
    del_query = del_qry
    db.query(del_query)
    db.conn.commit()

    print "\ncreating _master_prospects"
    query = "CREATE TABLE _master_prospects"
    for yr in range(2013, year+1):
        query += process_prospects(yr)
    print "writing _master_prospects"
    query += ";"
    db.query(query)
    db.conn.commit()

    print "\nupdating tables"
    update_tables(year)


    print "\nexporting to .xlsx"
    export_tables(year)

    print "\nexporting master to .csv"
    export_masterCSV("_master_prospects")
    export_masterCSV("_master_current")


    end_time = time()
    elapsed_time = float(end_time - start_time)
    print "\n\nmaster_prospect_tables.py"
    print "time elapsed (in seconds): " + str(elapsed_time)
    print "time elapsed (in minutes): " + str(elapsed_time/60.0)
コード例 #2
0
def process():
    print "processed_team_hitting"
    db.query("TRUNCATE TABLE `processed_team_hitting_basic`")
    db.query("TRUNCATE TABLE `processed_team_hitting_advanced`")

    yr_min, yr_max = db.query(
        "SELECT MIN(year), MAX(year) FROM processed_league_averages_pitching"
    )[0]

    for year in range(yr_min, yr_max + 1):
        for _type in ('basic', 'advanced'):
            print str(year) + "\thitting\t" + _type
            table = 'processed_team_hitting_%s' % (_type)

            if _type == 'basic':
                entries = process_basic(year)
            elif _type == 'advanced':
                entries = process_advanced(year)

            if entries != []:
                db.insertRowDict(entries,
                                 table,
                                 replace=True,
                                 insertMany=True,
                                 rid=0)
            db.conn.commit()
コード例 #3
0
def export_to_csv(table_name):


    col_names_qry = """SELECT `COLUMN_NAME` 
    FROM `INFORMATION_SCHEMA`.`COLUMNS` 
    WHERE `TABLE_SCHEMA`='personal' 
    AND `TABLE_NAME`='%s';"""
    col_names_query = col_names_qry % (table_name)

    col_names = db.query(col_names_query)

    columns = []
    for col_name in col_names:
        columns.append(col_name[0])

    csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/personal_%s.csv" % (table_name)
    csv_file = open(csv_title, "wb")
    append_csv = csv.writer(csv_file)
    append_csv.writerow(columns)

    qry = "SELECT * FROM %s ORDER BY 1;" % table_name

    res = db.query(qry)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str,):
                row[i] = '"' + "".join([l if ord(l) < 128 else "" for l in val]).replace("<o>","").replace("<P>","").replace("\n","  ") + '"'
        append_csv.writerow(row)
コード例 #4
0
def process_wc(year, timestamp):
    print '\tdetermining wild card winners'
    teams_query = """SELECT year, team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type
    FROM __in_playoff_probabilities
    WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
    AND wild_card != 0;"""

    res = db.query(teams_query)

    for row in res:
        year, team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type = row

        lg = division[:2]

        oppn_qry = """SELECT team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type
        FROM __in_playoff_probabilities
        WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
        AND wild_card != 0
        AND year = %s
        AND total_playoff_games_played = %s
        AND left(division,2) = '%s'
        AND strength_type = '%s'
        AND team_name != '%s';"""

        oppn_query = oppn_qry % (year, total_playoff_games_played, lg,
                                 strength_type, team_name)

        oppns = db.query(oppn_query)

        win_wc = []
        for oppn in oppns:
            oppn_name, oppn_abb, oppn_division, oppn_wild_card, foo, foo = oppn

            matchup_prob = 1

            series_id = '%sWC' % (lg)

            series_wins, series_losses = get_series_data(
                series_id, team_name, oppn_name, strength_type)

            team_winProb = get_single_game_win_prob(team_abb, oppn_abb,
                                                    strength_type, year)

            series_games = 1

            series_prob = get_series_prob(series_games, series_wins,
                                          series_losses, team_winProb)

            win_wc.append(matchup_prob * series_prob)

        win_wc = sum(win_wc)

        db.updateRow(
            {'win_wc': win_wc},
            "__in_playoff_probabilities",
            ("team_name", "year", "total_playoff_games_played",
             "strength_type"),
            (team_name, year, total_playoff_games_played, strength_type),
            operators=['=', '=', '=', '='])
        db.conn.commit()
コード例 #5
0
ファイル: lineup_optimizer.py プロジェクト: Connor-R/NSBL
def process(year):
    start_time = time()

    # Each time we run this, we clear the pre-existing table
    db.query("TRUNCATE TABLE `__optimal_lineups`")

    i = 0 

    team_q = """SELECT DISTINCT team_abb
    FROM teams 
    -- FROM excel_rosters
    WHERE year = %s
    ORDER BY team_abb ASC
    """
    team_qry = team_q % (year)
    teams = db.query(team_qry)
    for team in teams:
        team_abb = team[0]

        i += 1
        print i, team_abb

        get_player_matrix(team_abb, year)

    end_time = time()

    elapsed_time = float(end_time - start_time)
    print "lineup_optimizer.py"
    print "time elapsed (in seconds): " + str(elapsed_time)
    print "time elapsed (in minutes): " + str(elapsed_time/60.0)
コード例 #6
0
def process_win_wc(year):
    print "win wild card"

    for _type in ('roster', 'projected'):
        # print '\t', _type
        for conf in ('AL', 'NL'):

            team_query = "SELECT team_abb, team_name, win_division, wc_1, wc_2, year, games_played FROM __playoff_probabilities JOIN (SELECT team_abb, MAX(year) AS year, MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) t2 USING (team_abb, year, games_played) WHERE strength_type = '%s' AND LEFT(division,2) = '%s' AND year = %s" % (
                _type, conf, year)
            team_res = db.query(team_query)

            adv_dict = {}
            for team_row in team_res:
                team_abb, team_name, div_prob, wc1_prob, wc2_prob, year, games_played = team_row
                # print '\t\t', team_name

                oppn_qry = """SELECT team_abb, team_name, wc_1, wc_2
                FROM __playoff_probabilities
                JOIN (SELECT team_abb, MAX(year) AS year, MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) t2 USING (team_abb, year, games_played)
                WHERE strength_type = '%s' 
                AND LEFT(division,2) = '%s'
                AND team_name != '%s'
                AND year = %s;"""
                oppn_query = oppn_qry % (_type, conf, team_name, year)
                oppn_res = db.query(oppn_query)

                wc_prob = 0.0
                for oppn_row in oppn_res:
                    oppn_abb, oppn_name, oppn_wc1, oppn_wc2 = oppn_row

                    # probability of wc1 * (oppn_wc2 | not wc2)
                    if (1.0 - float(wc2_prob)) == 0:
                        matchup1_prob = 0.0
                    else:
                        matchup1_prob = float(
                            wc1_prob * oppn_wc2) / (1.0 - float(wc2_prob))

                    # probability of wc2 * (oppn_wc1 | not wc1)
                    if (1.0 - float(wc1_prob)) == 0:
                        matchup2_prob = 0.0
                    else:
                        matchup2_prob = float(
                            wc2_prob * oppn_wc1) / (1.0 - float(wc1_prob))

                    matchup_prob = matchup1_prob + matchup2_prob

                    win_game_prob = get_single_game_win_prob(
                        team_abb, oppn_abb, _type, year)

                    wc_overall_prob = matchup_prob * win_game_prob

                    wc_prob += wc_overall_prob

                overall_prob = wc_prob
                adv_dict[team_name] = [
                    overall_prob, 1.0, False, year, games_played
                ]

            col_name = 'win_wc'
            adjust_probabilities(adv_dict, col_name, 1.0, _type)
コード例 #7
0
def initiate(end_year, scrape_length):
    if scrape_length == "All":
        current = False
        for year in range(2011, end_year):
            for team_id in range(1,31):
                url_base = "http://thensbl.com/%s/" % year
                url_ext = "tmindex%s.htm" % team_id
                url_index = url_base + url_ext

                html_ind = urllib2.urlopen(url_index)
                soup_ind = BeautifulSoup(html_ind,"lxml")
                team_name = (' '.join(soup_ind.find_all('h2')[1].get_text().split(" ")[1:]).split("\n")
                )[0].split("\r")[0]
                
                print url_index, team_name

                initiate_names(team_name, team_id, year, current, url_base)
    else:
        year = end_year
        current = True

        #Each week we truncate the current_rosters and re-fill. That's how we keep it current!
        db.query("TRUNCATE TABLE `current_rosters`")

        for team_id in range(1,31):
            url_base = "http://thensbl.com/"
            url_ext = "tmindex%s.htm" % team_id
            url_index = url_base + url_ext

            print url_index
            html_ind = urllib2.urlopen(url_index)
            soup_ind = BeautifulSoup(html_ind,"lxml")
            team_name = soup_ind.title.get_text()

            initiate_names(team_name, team_id, year, current, url_base)
コード例 #8
0
def export_masterCSV(table_name):
    print "\t exporting " + table_name + " to .csv"

    col_names_qry = """SELECT `COLUMN_NAME` 
    FROM `INFORMATION_SCHEMA`.`COLUMNS` 
    WHERE `TABLE_SCHEMA`='mlb_prospects' 
    AND `TABLE_NAME`='%s';"""
    col_names_query = col_names_qry % (table_name)

    col_names = db.query(col_names_query)

    columns = []
    for col_name in col_names:
        columns.append(col_name[0])

    csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/MLB_Prospects_%s.csv" % (table_name.replace("_",""))
    csv_file = open(csv_title, "wb")
    append_csv = csv.writer(csv_file)
    append_csv.writerow(columns)

    qry = "SELECT * FROM %s;" % table_name

    res = db.query(qry)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str,):
                row[i] = '"' + "".join([l if ord(l) < 128 else "" for l in val]).replace("<o>","").replace("<P>","") + '"'
        append_csv.writerow(row)
コード例 #9
0
def team_war(team_abb, year):
    entry = {}
    entry['year'] = year
    entry['team_abb'] = team_abb

    hitter_q = """SELECT
    SUM(defense),
    SUM(position_adj),
    SUM(dWAR),
    SUM(oWAR),
    SUM(replacement),
    SUM(WAR)
    FROM processed_WAR_hitters
    WHERE year = %s
    AND team_abb = '%s';
    """

    hitter_qry = hitter_q % (year, team_abb)

    hitter_data = db.query(hitter_qry)[0]

    defense, position_adj, dWAR, oWAR, replacement, hitter_WAR = hitter_data

    entry['defense'] = defense
    entry['position_adj'] = position_adj
    entry['dWAR'] = dWAR
    entry['oWAR'] = oWAR
    entry['replacement'] = replacement
    entry['hitter_WAR'] = hitter_WAR


    pitcher_q = """SELECT
    SUM(FIP_WAR),
    SUM(ERA_WAR)
    FROM processed_WAR_pitchers
    WHERE year = %s
    AND team_abb = '%s';
    """

    pitcher_qry = pitcher_q % (year, team_abb)

    pitcher_data = db.query(pitcher_qry)[0]

    FIP_WAR, ERA_WAR = pitcher_data

    entry['FIP_WAR'] = FIP_WAR
    entry['ERA_WAR'] = ERA_WAR


    total_fWAR = hitter_WAR + FIP_WAR
    total_rWAR = hitter_WAR + ERA_WAR

    entry['total_fWAR'] = total_fWAR
    entry['total_rWAR'] = total_rWAR

    return entry
コード例 #10
0
def acquire_shootingData(dataType, _id, season='', isCareer=True):

    if isCareer is False:
        start_season_filt, end_season_filt = db.query("SELECT min(game_date), max(game_date) FROM shots WHERE season_id = %s" % (season.replace('-','')))[0]


        # start_season_filt = str(season[:4])+'-08-01'
        # end_season_filt = str(int(season[:4])+1)+'-08-01'

        query_append = """AND season_id = %s
        AND game_date > '%s'
        AND game_date < '%s'""" % (season.replace('-',''), start_season_filt, end_season_filt)
    else:
        query_append = ''

    shot_query = """SELECT
    season_id, game_id, 
    team_id, game_date,
    event_type, shot_type, 
    shot_zone_basic, shot_zone_area, LOC_X, LOC_Y,
    IF(event_type='Made Shot', 1, 0) AS SHOT_MADE_FLAG,
    zone_pct_plus,
    efg_plus
    FROM shots
    JOIN shots_%s_Relative_Year USING (season_id, season_type, %s_id, shot_zone_basic, shot_zone_area)
    WHERE %s_id = %s
    AND season_type = 'Reg'
    %s"""

    shot_q = shot_query % (dataType, dataType, dataType, _id, query_append)

    shots = db.query(shot_q)

    shot_data = {'season_id':[], 'game_id':[], 'team_id':[], 'game_date':[], 'event_type':[], 'shot_type':[], 'shot_zone_basic':[], 'shot_zone_area':[], 'LOC_X':[], 'LOC_Y':[], 'SHOT_MADE_FLAG':[], 'zone_pct_plus':[], 'efg_plus':[]}

    for row in shots:
        season_id, game_id, team_id, game_date, event_type, shot_type, shot_zone_basic, shot_zone_area, LOC_X, LOC_Y, SHOT_MADE_FLAG, zone_pct_plus, efg_plus = row

        shot_data['season_id'].append(season_id)
        shot_data['game_id'].append(game_id)
        shot_data['team_id'].append(team_id)
        shot_data['game_date'].append(game_date)
        shot_data['event_type'].append(event_type)
        shot_data['shot_type'].append(shot_type)
        shot_data['shot_zone_basic'].append(shot_zone_basic)
        shot_data['shot_zone_area'].append(shot_zone_area)
        shot_data['LOC_X'].append(LOC_X)
        shot_data['LOC_Y'].append(LOC_Y)
        shot_data['SHOT_MADE_FLAG'].append(SHOT_MADE_FLAG)
        shot_data['zone_pct_plus'].append(zone_pct_plus)
        shot_data['efg_plus'].append(efg_plus)

    shot_df = pd.DataFrame(shot_data, columns=shot_data.keys())

    return shot_df
コード例 #11
0
ファイル: chart_bot.py プロジェクト: Connor-R/nba_shot_charts
def get_random_pic(players, hashtags, thread):
    if players == []:
        p_name, p_id = get_rand_player()
        print p_name
        chart_player = p_name.replace(' ', '_')
        player_path = base_path + chart_player + '(' + str(p_id) + ')/'
        charts.gen_charts(p_id)
        try:
            rand_chart = os.listdir(player_path)[random.randint(
                0,
                len(os.listdir(player_path)) - 1)]
            tweet(player_path, rand_chart, hashtags, p_id, thread)
        except OSError:
            get_random_pic([], hashtags, thread)
    else:
        for player in players:
            print player
            try:
                p_id = db.query(
                    "SELECT player_id FROM players WHERE CONCAT(fname, ' ', lname) = '%s'"
                    % player.replace("'", "\\'"))[0][0]
            except (OSError, IndexError):
                if player == 'Mike James':
                    p_id = 1628455
                elif player == 'J.J. Redick':
                    p_id = 200755
                elif player == 'C.J. McCollum':
                    p_id = 203468

            charts.gen_charts(p_id)
        # raw_input("READY TO TWEET?")

        for player in players:
            try:
                p_id = db.query(
                    "SELECT player_id FROM players WHERE CONCAT(fname, ' ', lname) = '%s'"
                    % player.replace("'", "\\'"))[0][0]
            except (OSError, IndexError):
                if player == 'Mike James':
                    p_id = 1628455
                elif player == 'J.J. Redick':
                    p_id = 200755
                elif player == 'C.J. McCollum':
                    p_id = 203468

            player_path = base_path + player.replace(
                ' ', '_') + '(' + str(p_id) + ')/'
            # tweets a range of seasons (-1 is career, -2 is current season, -3 is 2 seasons previous, etc.)
            for i in range(max(0,
                               len(os.listdir(player_path)) - 1),
                           len(os.listdir(player_path)) - 0):
                chart = os.listdir(player_path)[i]
                # print chart
                tweet(player_path, chart, hashtags, p_id, thread)
コード例 #12
0
def process_champion(year):
    print "win world series"

    for _type in ('roster', 'projected'):
        # print '\t', _type
        champ_dict = {}
        for conf in ('AL', 'NL'):

            team_query = "SELECT team_abb, team_name, make_ws, year, games_played FROM __playoff_probabilities JOIN (SELECT team_abb, MAX(year) AS year, MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) t2 USING (team_abb, year, games_played) WHERE strength_type = '%s' AND LEFT(division,2) = '%s' AND year = %s" % (
                _type, conf, year)
            team_res = db.query(team_query)

            for team_row in team_res:
                team_abb, team_name, make_ws, year, games_played = team_row

                # print '\t\t', team_name

                oppn_qry = """SELECT team_abb, team_name, make_ws
                FROM __playoff_probabilities
                JOIN (SELECT team_abb, MAX(YEAR) AS 'year', MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) a USING (team_abb, YEAR, games_played)
                WHERE strength_type = '%s'
                AND LEFT(division,2) != '%s'
                AND year = %s;"""
                oppn_query = oppn_qry % (_type, conf, year)

                # raw_input(oppn_query)
                oppn_res = db.query(oppn_query)

                champ_prob = 0.0
                for oppn_row in oppn_res:
                    oppn_abb, oppn_name, oppn_ws = oppn_row

                    matchup_prob = float(make_ws) * float(oppn_ws)

                    win_game_prob = get_single_game_win_prob(
                        team_abb, oppn_abb, _type, year)
                    series_games = 7
                    win_series = get_series_prob(series_games=series_games,
                                                 series_wins=0,
                                                 series_losses=0,
                                                 team_winProb=win_game_prob)

                    champ_overall_prob = matchup_prob * win_series

                    champ_prob += champ_overall_prob

                overall_prob = champ_prob
                champ_dict[team_name] = [
                    overall_prob, 1.0, False, year, games_played
                ]

        col_name = 'win_ws'
        adjust_probabilities(champ_dict, col_name, 1.0, _type)
コード例 #13
0
def process(group_type, time_type):
    query = """SELECT %s_id, season_id, season_type, attempts, ROUND(sum_efg_plus/attempts,4) AS ShotSkillPlus
        FROM(
            SELECT %s_id, season_id, season_type, SUM(attempts*zone_efg_plus) AS sum_efg_plus
            FROM shots_%s_Relative_%s r
            WHERE shot_zone_area != 'all'
            AND shot_zone_basic != 'all'
            GROUP BY %s_id, season_id, season_type
        ) a
        JOIN(
            SELECT %s_id, season_id, season_type, attempts
            FROM shots_%s_Relative_%s r
            WHERE shot_zone_area = 'all'
            AND shot_zone_basic = 'all'
            GROUP BY %s_id, season_id, season_type          
        ) b USING (%s_id, season_id, season_type);
"""

    q = query % (group_type, group_type, group_type, time_type, group_type,
                 group_type, group_type, time_type, group_type, group_type)

    # raw_input(q)
    res = db.query(q)
    # raw_input(res)
    entries = []
    _id = '%s_id' % (group_type.lower())
    for row in res:
        # print row
        type_id, season_id, season_type, attempts, shotskillplus = row
        entry = {
            _id: type_id,
            "season_id": season_id,
            "season_type": season_type,
            "attempts": attempts,
            "ShotSkillPlus": shotskillplus
        }
        entries.append(entry)

    table = "shot_skill_plus_%s_%s" % (group_type, time_type)

    if time_type == "Career":
        db.query("TRUNCATE TABLE %s;" % (table))

    if entries != []:
        for i in range(0, len(entries), 1000):
            db.insertRowDict(entries[i:i + 1000],
                             table,
                             insertMany=True,
                             replace=True,
                             rid=0,
                             debug=1)
            db.conn.commit()
コード例 #14
0
ファイル: zips_prep_FA.py プロジェクト: Connor-R/NSBL
def process(year):

    clear_year = "DELETE FROM zips_fangraphs_prep_FA_batters WHERE year = %s;" % (
        year)
    db.query(clear_year)
    db.conn.commit()
    batters(year)

    clear_year = "DELETE FROM zips_fangraphs_prep_FA_pitchers WHERE year = %s;" % (
        year)
    db.query(clear_year)
    db.conn.commit()
    pitchers(year)
コード例 #15
0
 def check_car_dupes(result, current):
     _id = result['cl_id']
     _name = result['title']
     _price = result['list_price']
     check_ID = db.query("SELECT cl_id FROM cars_%s WHERE cl_id = %s" %
                         (current, _id))
     if check_ID != ():
         return True
     check_namePrice = db.query(
         "SELECT cl_id FROM cars_%s WHERE title = '%s' AND list_price = %s"
         % (current, _name, _price))
     if check_namePrice != ():
         return True
     return False
コード例 #16
0
def export_tables(year):
    for table_name in ("_draft_list", "_master_current", "_master_prospects"):
        print "\t exporting " + table_name + " to .xlsx"
        qry = "SELECT * FROM %s;" % table_name

        res = db.query(qry)

        file_name = "/Users/connordog/Dropbox/Desktop_Files/Baseball/NSBL/%s_%s.xlsx" % (table_name, year)

        workbook = Workbook(file_name)

        sheet = workbook.add_worksheet()

        col_names_qry = """SELECT `COLUMN_NAME` 
        FROM `INFORMATION_SCHEMA`.`COLUMNS` 
        WHERE `TABLE_SCHEMA`='mlb_prospects' 
        AND `TABLE_NAME`='%s';"""
        col_names_query = col_names_qry % (table_name)

        col_names = db.query(col_names_query)

        for col_num, col_name in enumerate(col_names):
            cell_format = workbook.add_format()
            cell_format.set_bold()
            sheet.write(0, col_num, col_name[0], cell_format)

        for i, row in enumerate(res):
            cell_format = workbook.add_format()
            cell_format.set_shrink()
            for j, col in enumerate(row):
                if type(col) in (str,):
                    col = "".join([l if ord(l) < 128 else "" for l in col])
                sheet.write(i+1, j, col, cell_format)

        if table_name == "_draft_list":
            sheet.freeze_panes(1,8)
            sheet.autofilter('A1:EN1')

        elif table_name == "_master_current":
            sheet.freeze_panes(1,16)
            sheet.autofilter('A1:EG1')

        elif table_name == "_master_prospects":
            sheet.freeze_panes(1,12)
            sheet.autofilter('A1:DY1')


        workbook.set_size(1800,1200)
        workbook.close()
コード例 #17
0
def update_prospect(year, team, fname, lname, category, value):

    print "\tupdate ", year, team, '\t', "{:<32}".format(
        fname + " " + lname), '\t', "{:<16}".format(category), '\t', value
    update_qry = """UPDATE minorleagueball_professional 
    SET %s = '%s'
    WHERE year = %s
    AND team = "%s"
    AND fname = "%s"
    AND lname = "%s";"""

    update_query = update_qry % (category, value, year, team, fname, lname)
    # raw_input(update_query)
    db.query(update_query)
    db.conn.commit()
コード例 #18
0
def initiate():
    start_time = time()

    clear_ids = "UPDATE minorleagueball_professional SET prospect_id = 0;"
    db.query(clear_ids)
    db.conn.commit()

    process_primary_update()
    process_secondary_update()

    end_time = time()
    elapsed_time = float(end_time - start_time)
    print "\n\nminorleagueball_prospect_id_grade_updater.py"
    print "time elapsed (in seconds): " + str(elapsed_time)
    print "time elapsed (in minutes): " + str(elapsed_time / 60.0)
コード例 #19
0
def export_tv_show_csv():

    qry = """SELECT
    name, genre, seasons, episodes, 
    episode_length, episodes_per_season, approx_runtime_hours,
    peak, consistency, premise, plot, information_gain, desired_effects, wit, length, timelessness, adjustment, overall_grade
    FROM (SELECT name FROM tv_show_grades UNION SELECT name FROM tv_show_data) a
    LEFT JOIN tv_show_grades USING (name)
    LEFT JOIN tv_show_data USING (name)
    ORDER BY overall_grade DESC;"""

    res = db.query(qry)

    csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/personal_tvShows.csv"
    csv_file = open(csv_title, "wb")
    append_csv = csv.writer(csv_file)
    headers = [
        'name', 'genre', 'seasons', 'episodes', 'episode_length',
        'episodes_per_season', 'approx_runtime_hours', 'peak', 'consistency',
        'premise', 'plot', 'information_gain', 'desired_effects', 'wit',
        'length', 'timelessness', 'adjustment', 'overall_grade'
    ]
    append_csv.writerow(headers)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str, ):
                row[i] = '"' + "".join(
                    [l if ord(l) < 128 else "" for l in val]).replace(
                        "<o>", "").replace("<P>", "").replace("\n", "  ") + '"'
        append_csv.writerow(row)
コード例 #20
0
def process(author, topic, medium, keyword):

    if (author == '' and topic == '' and medium == '' and keyword == ''):
        sys.exit('\n\nplease specify an author, topic, medium, or keyword\n\n')

    quote_lookup = """SELECT 
    estimated_date, author, topic, medium, source, source_location, quote
    FROM quotes
    WHERE 1
    AND (author LIKE "%%%s%%" AND topic LIKE "%%%s%%" AND medium LIKE "%%%s%%" AND quote LIKE "%%%s%%")
    ;""" % (author, topic, medium, keyword)

    res = db.query(quote_lookup)

    for i, row in enumerate(res):
        _date, _author, _topic, _medium, _source, _sourceLocation, _quote = row

        print "\n-----------------------------\n"

        print "\t" + "Quote #" + str(i+1) + " of " + str(len(res)) + ":"

        print "\tTopic: " + str(_topic)

        src = ""
        if _source is None:
            src = _medium
        else:
            src = _source
        print "\t" + "On " + str(_date) + ", " + _author + " says via " + src + ":\n\"\"\""

        print _quote 

        print "\"\"\"\n"
コード例 #21
0
def comment_updater(boulder_name, area, sub_area, update_comment):
    qry = """SELECT *
    FROM boulders_tried
    WHERE boulder_name = "%s"
    AND area = "%s"
    AND sub_area = "%s"
    AND completed = "TRUE";"""

    query = qry % (boulder_name, area, sub_area)

    res = db.query(query)

    if len(res) != 1:
        print "\n\n\nERROR", boulder_name, "HAS LENGTH", str(len(res))
    else:
        entry = {}
        _date, est_time, boulder_name, area, sub_area, v_grade, est_attempts, est_minutes, return_interest, session_num, completed, first_comment = res[0]

        entry["est_date"] = _date
        entry["est_time"] = est_time
        entry["boulder_name"] = boulder_name
        entry["area"] = area
        entry["sub_area"] = sub_area
        entry["v_grade"] = v_grade
        entry["est_attempts"] = est_attempts
        entry["est_minutes"] = est_minutes
        entry["return_interest"] = return_interest
        entry["session_num"] = session_num
        entry["completed"] = "TRUE"
        entry["comment"] = update_comment

        db.insertRowDict(entry, 'boulders_tried', insertMany=False, replace=True, rid=0, debug=1)
        db.conn.commit()
コード例 #22
0
def process_returnInterest():
    csv_path = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/boulders_returnInterest.csv"
    csv_file = open(csv_path, "wb")
    append_csv = csv.writer(csv_file)
    csv_header = ["Estimated Date", "Estimated Time", "Boulder Name", "Area", "Sub Area", "V Grade", "Estimated Attempts", "Estimated Minutes", "Return Interest", "Comment"]
    append_csv.writerow(csv_header)
    
    qry = """SELECT 
    bt.est_date, bt.est_time,
    bt.boulder_name, bt.area, bt.sub_area,
    bt.v_grade, bt.est_attempts, bt.est_minutes, bt.return_interest, bt.comment
    FROM boulders_tried bt
    LEFT JOIN boulders_completed bc USING (boulder_name, area)
    JOIN (SELECT boulder_name, area, max(est_date) AS est_date FROM boulders_tried GROUP BY boulder_name, area) md USING (boulder_name, area, est_date)
    WHERE bc.ascent_date IS NULL
    ORDER BY est_date DESC, est_time DESC;"""

    res = db.query(qry)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str,unicode):
                row[i] = '"' + "".join([l if ord(l) < 128 else "" for l in val]).replace("<o>","").replace("<P>","").replace("\n","  ") + '"'
        append_csv.writerow(row)
コード例 #23
0
def generate_body(author, topic, medium, keyword, count, to_address):
    sub = 'Daily Quotes [%s]' % (str(date.today()))

    quote_lookup = """SELECT 
    QuoteID, estimated_date, author, topic, medium, source, source_location, quote
    FROM quotes
    WHERE 1
    AND (author LIKE "%%%s%%" AND topic LIKE "%%%s%%" AND medium LIKE "%%%s%%" AND quote LIKE "%%%s%%")
    ORDER BY rand()
    LIMIT %s
    ;""" % (author, topic, medium, keyword, count)
    res = db.query(quote_lookup)

    mesg = ''
    for i, row in enumerate(res):
        _id, _date, _author, _topic, _medium, _source, _sourceLocation, _quote = row

        mesg += "\n\t" + "Quote #" + str(i+1) + " (QuoteID " + str(_id) + ") of " + str(len(res)) + ":"

        mesg += "\n\tTopic: " + str(_topic)

        src = ""
        if _source is None:
            src = _medium
        else:
            src = _source
        mesg += "\n\t" + "On " + str(_date) + ", " + _author + " says via " + src + ":\n\"\"\""

        mesg += "\n" + _quote 

        mesg += "\n\"\"\"\n"

        mesg += "\n------------------------------------------------------------------------------------\n"

    email(sub, mesg, to_address)
コード例 #24
0
def query_listings(search_type, mesg, ids):

    qry = """SELECT cl_id, title, url, sub_site, has_image, has_map, list_price FROM cars_current 
WHERE 1
AND cl_id NOT IN (SELECT cl_id FROM _email_ids)
AND mackenzie_search = '%s'
ORDER BY has_image+has_map DESC, list_price ASC"""

    query = qry % (search_type)

    # raw_input(query)

    res = db.query(query)

    j = 0

    for row in res:
        cl_id, title, url, sub_site, has_image, has_map, list_price = row

        ids.append(cl_id)

        mesg += str(j + 1) + '. ' + title
        # mesg += '\n' + str(cl_id) + ' : ' + str(bool(has_image)) + ' image : ' + str(bool(has_map)) + ' map'
        mesg += '\n$' + str(list_price) + ' : ' + str(sub_site) + '\n'
        mesg += '\t' + url + '\n\n'
        j += 1

    return mesg
コード例 #25
0
def process_returnInterest():
    csv_path = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/boulders_returnInterest.csv"
    csv_file = open(csv_path, "wb")
    append_csv = csv.writer(csv_file)
    csv_header = [
        "Estimated Date", "Estimated Time", "Boulder Name", "Area", "Sub Area",
        "V Grade", "Estimated Attempts", "Estimated Minutes",
        "Return Interest", "Comment"
    ]
    append_csv.writerow(csv_header)

    qry = """SELECT 
    bt.est_date, bt.est_time,
    bt.boulder_name, bt.area, bt.sub_area,
    bt.v_grade, bt.est_attempts, bt.est_minutes, bt.return_interest, bt.comment
    FROM boulders_tried bt
    LEFT JOIN boulders_completed bc USING (boulder_name, area)
    JOIN (SELECT boulder_name, area, max(est_date) AS est_date FROM boulders_tried GROUP BY boulder_name, area) md USING (boulder_name, area, est_date)
    WHERE bc.ascent_date IS NULL
    ORDER BY est_date DESC, est_time DESC;"""

    res = db.query(qry)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str, unicode):
                row[i] = '"' + "".join(
                    [l if ord(l) < 128 else "" for l in val]).replace(
                        "<o>", "").replace("<P>", "").replace("\n", "  ") + '"'
        append_csv.writerow(row)
コード例 #26
0
def adjust_fg_names(full_name):
    """
    Splits a players full name into first and last names and returns those values.
    Also will adjust a player's name if their name has been listed non ideally so we can better match them to the professional_prospects table.
    """

    player_mapper = {}

    qry = """SELECT wrong_name
    , right_fname
    , right_lname
    FROM NSBL.name_mapper nm
    ;"""

    res = db.query(qry)
    for row in res:
        wrong, right_fname, right_lname = row
        player_mapper[wrong] = [right_fname, right_lname]

    if full_name in player_mapper:
        fname, lname = player_mapper.get(full_name)
        full_name = fname + " " + lname
        return full_name, fname, lname
    else:
        fname, lname = [
            full_name.split(" ")[0], " ".join(full_name.split(" ")[1:])
        ]
        return full_name, fname, lname
コード例 #27
0
def get_yesterdaysPlayers(days=1):
    player_list = {}

    qry = """SELECT 
    player_id, 
    CONCAT(fname, ' ', lname) as 'player_name', 
    LEFT(MIN(season_id),4) as 'start_year', 
    LEFT(MAX(season_id),4)+1 as 'end_year',
    COUNT(*) as shot_attempts
    FROM (SELECT player_id, season_id FROM shots WHERE season_type = 'Reg' AND game_date >= (CURDATE() - INTERVAL %s DAY)) yesterdays_players
    JOIN players USING (player_id)
    GROUP BY player_id
    ORDER BY shot_attempts DESC;"""

    query = qry % (days)
    # raw_input(query)

    res = db.query(query)
    if res == ((None, None, None, None, None),):
        return None

    for row in res:
        p_id, p_name, start_year, end_year, shot_attempts = row
        player_list[p_id] = [str(p_name), int(start_year), int(end_year)]

    return player_list
コード例 #28
0
def get_plist(backfill=False):
    p_list = {}

    query = """SELECT team_id, city, tname, start_year, end_year, end_year-GREATEST(1996,start_year) AS seasons_cnt
    FROM teams
    WHERE end_year >= 1997
    ORDER BY team_id ASC, end_year DESC"""
    res = db.query(query)

    for row in res:
        team_id, city, team_name, start_year, end_year, seasons_cnt = row

        team_search_name = city.replace(" ", "_") + "_" + team_name.replace(
            " ", "_")

        if backfill is True:
            if os.path.exists(os.getcwd() + '/shot_charts_team/' +
                              team_search_name + '(' + str(team_id) + ')'):
                continue

        # a filter for which teams to update
        p_list[city.replace(' ', '_') + '_' + team_name.replace(' ', '_') +
               '(' + str(team_id) +
               ')'] = [team_id, city, team_name, start_year, end_year]

    return p_list
コード例 #29
0
def process(zips_list, observed_list, hb):
    if hb == 'hitters':
        q = """SELECT YEAR, player_name,
        n.babip AS sim_babip, z.babip AS zips_babip
        FROM processed_compWAR_offensive n
        JOIN zips_WAR_hitters_comp z USING (YEAR, player_name)
        WHERE YEAR >= 2011
        AND YEAR < 2019
        AND n.pa > 400;
        """
    else:
        q = """SELECT YEAR, player_name,
        n.babip AS sim_babip, z.babip AS zips_babip
        FROM register_pitching_analytical n
        JOIN zips_WAR_pitchers_comp z USING (YEAR, player_name)
        WHERE YEAR >= 2011
        AND YEAR < 2019
        AND bip > 300;
        """

    qry = q

    res = db.query(qry)

    for row in res:
        year, player_name, sim, zips = row

        zips_list.append(float(zips))
        observed_list.append(float(sim))
コード例 #30
0
def export_to_csv():


    qry = """SELECT
    show_name, genre, seasons, episodes, 
    episode_length, episodes_per_season, approx_runtime_hours,
    peak_grade, consistency_grade, adjustment, overall_grade
    FROM (SELECT SHOW_name FROM tv_show_grades UNION SELECT show_name FROM tv_show_data) a
    LEFT JOIN tv_show_grades USING (show_name)
    LEFT JOIN tv_show_data USING (show_name)
    ORDER BY overall_grade DESC;"""

    res = db.query(qry)

    csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/personal_tvShows.csv"
    csv_file = open(csv_title, "wb")
    append_csv = csv.writer(csv_file)
    headers = ['show_name', 'genre', 'estimated_seasons', 'estimated_episodes', 'episode_length', 'episodes_per_season', 'approx_runtime_hours', 'peak_grade', 'consistency_grade', 'adjustment', 'overall_grade']
    append_csv.writerow(headers)

    for row in res:
        row = list(row)
        for i, val in enumerate(row):
            if type(val) in (str,):
                row[i] = '"' + "".join([l if ord(l) < 128 else "" for l in val]).replace("<o>","").replace("<P>","").replace("\n","  ") + '"'
        append_csv.writerow(row)
コード例 #31
0
def check_for_updates(): ##### change me
    print "\n\tchecking for boulders to update"
    update_qry = """SELECT 
    euro_grade, soft_hard, flash, boulder_name, area, sub_area, fa, est_attempts, comment, stars, ascent_date, recommended
    FROM boulders_completed
    WHERE updated = "FALSE";"""

    update_res = db.query(update_qry)

    if update_res == ():
        print "\t\tNo boulders to update!"
    else:
        for i, row in enumerate(update_res):
            print "\n\nUpdate %s of %s" % (i+1, len(update_res))

            keys = ["grade", "soft_hard", "style", "name", "area", "sub area", "FA", "2ndGo", "comment", "stars", "date", "recommended"]

            _grade2, soft_hard, _flash, _name, _area, _subarea, _fa, attempts, _comment, _stars, _date, _recommended = row
            
            if _flash is None:
                _flash = "Redpoint"

            if attempts == 2:
                go2 = "Second GO"
            else:
                go2 = "None"

            vals = [_grade2, soft_hard, _flash, _name, _area, _subarea, go2, _fa, _comment, _stars, _date, _recommended]
            for k, v in zip(keys, vals):
                print ("\t"+k+":"), "\n", v

            # if (((i+1)%3 == 0) or ((i+1) == len(update_res))):
            raw_input("\n\nGo to 8a.nu to update!\n\n")
コード例 #32
0
def update_boulders():
    qry = """SELECT 
    ascent_date, boulder_name, area, sub_area, v_grade, bc.comment,
    TIMEDIFF(bc.final_time, SEC_TO_TIME(fin.final_min*60)) AS 'final_time', 
    fin.final_min,
    fin.final_att,
    bc.est_sessions,
    (bc.est_attempts - IFNULL(bt.est_attempts,0)) AS 'update_attempts',
    (bc.est_minutes - IFNULL(bt.est_minutes,0)) AS 'update_minutes',
    (bc.est_sessions - IFNULL(bt.est_sessions,0)) AS 'update_sessions',
    (1 - IFNULL(bt.sent,0)) AS 'update_completed'
    FROM boulders_completed bc
    LEFT JOIN(
        SELECT 
        boulder_name, area, sub_area, SUM(est_attempts) AS est_attempts, SUM(est_minutes) AS est_minutes, COUNT(*) AS est_sessions,
        SUM(IF(completed="TRUE",1,0)) AS sent
        FROM boulders_tried
        GROUP BY boulder_name, area, sub_area
    ) bt USING (boulder_name, area, sub_area)
    LEFT JOIN(
        SELECT boulder_name, area, sub_area, 
        bc2.est_minutes-IFNULL(bt2.fail_minutes,0) AS 'final_min', 
        bc2.est_attempts-IFNULL(bt2.fail_attempts,0) AS 'final_att'
        FROM boulders_completed bc2
        LEFT JOIN (
            SELECT boulder_name, area, sub_area, SUM(est_attempts) AS fail_attempts, SUM(est_minutes) AS fail_minutes FROM boulders_tried WHERE completed = 'FALSE' GROUP BY boulder_name, area, sub_area
        ) bt2 USING (boulder_name, area, sub_area)
    ) fin USING (boulder_name, area, sub_area)
    WHERE bc.updated = "FALSE"
    ORDER BY ascent_date DESC, update_sessions DESC;"""

    res = db.query(qry)

    for row in res:
        process_update(row)
コード例 #33
0
def update_boulders():
    qry = """SELECT 
    ascent_date, boulder_name, area, sub_area, v_grade, bc.comment,
    TIMEDIFF(bc.final_time, SEC_TO_TIME(fin.final_min*60)) AS 'final_time', 
    fin.final_min,
    fin.final_att,
    bc.est_sessions,
    (bc.est_attempts - IFNULL(bt.est_attempts,0)) AS 'update_attempts',
    (bc.est_minutes - IFNULL(bt.est_minutes,0)) AS 'update_minutes',
    (bc.est_sessions - IFNULL(bt.est_sessions,0)) AS 'update_sessions',
    (1 - IFNULL(bt.sent,0)) AS 'update_completed'
    FROM boulders_completed bc
    LEFT JOIN(
        SELECT 
        boulder_name, area, sub_area, SUM(est_attempts) AS est_attempts, SUM(est_minutes) AS est_minutes, COUNT(*) AS est_sessions,
        SUM(IF(completed="TRUE",1,0)) AS sent
        FROM boulders_tried
        GROUP BY boulder_name, area, sub_area
    ) bt USING (boulder_name, area, sub_area)
    LEFT JOIN(
        SELECT boulder_name, area, sub_area, 
        bc2.est_minutes-IFNULL(bt2.fail_minutes,0) AS 'final_min', 
        bc2.est_attempts-IFNULL(bt2.fail_attempts,0) AS 'final_att'
        FROM boulders_completed bc2
        LEFT JOIN (
            SELECT boulder_name, area, sub_area, SUM(est_attempts) AS fail_attempts, SUM(est_minutes) AS fail_minutes FROM boulders_tried WHERE completed = 'FALSE' GROUP BY boulder_name, area, sub_area
        ) bt2 USING (boulder_name, area, sub_area)
    ) fin USING (boulder_name, area, sub_area)
    WHERE bc.updated != "FALSE"
    ORDER BY ascent_date DESC, update_sessions DESC;"""

    res = db.query(qry)

    for row in res:
        process_update(row)
コード例 #34
0
def process_defense(year):
    entries = []
    qry = """SELECT 
    r.team_abb, SUM(defense), SUM(position_adj), SUM(dWAR)
    FROM register_batting_primary r
    JOIN processed_compWAR_offensive o USING (player_name, team_abb, YEAR)
    JOIN processed_WAR_hitters w USING (player_name, team_abb, YEAR)
    WHERE r.year = %s
    GROUP BY r.team_abb;"""

    query = qry % (year)
    res = db.query(query)

    for row in res:
        team_abb, defense, pos_adj, dWAR = row

        entry = {}
        entry["year"] = year
        entry["team_abb"] = team_abb
        entry["defense"] = defense
        entry["position_adj"] = pos_adj
        entry["dWAR"] = dWAR

        entries.append(entry)

    return entries
コード例 #35
0
def adjust_minorleagueball_name(full_name, year, team_abb):
    """
    Splits a players minorleagueball full name into first and last names and returns those values.
    Also will adjust a player's name if their name has been listed non ideally so we can better match them to the professional_prospects table.
    """

    search_str = full_name.replace(" ",
                                   "") + "_" + str(year) + "_" + str(team_abb)

    player_mapper = {}

    qry = """SELECT wrong_name
    , right_fname
    , right_lname
    FROM NSBL.name_mapper nm
    ;"""

    res = db.query(qry)
    for row in res:
        wrong, right_fname, right_lname = row
        player_mapper[wrong] = [right_fname, right_lname]

    if search_str in player_mapper:
        fname, lname = player_mapper.get(search_str)
        full_name = fname + " " + lname
        return full_name, fname, lname
    else:
        fname, lname = [
            full_name.replace("  ", " ").split(" ")[0],
            " ".join(full_name.split(" ")[1:])
        ]
        return full_name, fname, lname
コード例 #36
0
def initiate():
    start_time = time()

    print "\ndeleting backfilled tried boulders"
    db.query("DELETE FROM boulders_tried WHERE return_interest IS NULL AND completed = 'FALSE';")
    db.conn.commit()

    print "\nupdating tried boulders"
    update_boulders()

    print "\nupdating session numbers on tried boulders"
    update_sessions()

    end_time = time()
    elapsed_time = float(end_time - start_time)
    print "\n\ncompleted_to_tried.py"
    print "time elapsed (in seconds): " + str(elapsed_time)
    print "time elapsed (in minutes): " + str(elapsed_time/60.0)
コード例 #37
0
def update_sessions():
    qry = """SELECT boulder_name, area, sub_area
    FROM boulders_tried
    WHERE est_date > '0000-00-00'
    GROUP BY boulder_name, area, sub_area;"""

    res = db.query(qry)

    for row in res:
        boulder_name, area, sub_area = row

        ind_qry = """SELECT *
        FROM boulders_tried
        WHERE boulder_name = "%s"
        AND area = "%s"
        AND sub_area = "%s"
        ORDER BY est_date, est_time;"""

        ind_query = ind_qry % (boulder_name, area, sub_area)

        ind_res = db.query(ind_query)

        for cnt, ind_row in enumerate(ind_res):
            entry = {}
            _date, est_time, boulder_name, area, sub_area, v_grade, est_attempts, est_minutes, return_interest, session_num, completed, first_comment = ind_row

            entry["est_date"] = _date
            entry["est_time"] = est_time
            entry["boulder_name"] = boulder_name
            entry["area"] = area
            entry["sub_area"] = sub_area
            entry["v_grade"] = v_grade
            entry["est_attempts"] = est_attempts
            entry["est_minutes"] = est_minutes
            entry["return_interest"] = return_interest
            entry["session_num"] = cnt+1
            entry["completed"] = completed
            entry["comment"] = first_comment

            db.insertRowDict(entry, 'boulders_tried', insertMany=False, replace=True, rid=0, debug=1)
            db.conn.commit()
コード例 #38
0
def update_grades():
    qry = "SELECT * FROM tv_show_grades;"
    res = db.query(qry)

    for row in res:
        show_name, genre, episode_length, peak, consistency, adj, runtime, grade = row

        grade = (float(peak)*3 + float(consistency)*2)/5 + max(adj,0)

        entry = {"show_name":show_name, "genre":genre, "episode_length":episode_length, "peak_grade":peak, "consistency_grade":consistency, "adjustment":adj, "approx_runtime_hours":runtime, "overall_grade":grade}

        db.insertRowDict(entry, 'tv_show_grades', insertMany=False, replace=True, rid=0, debug=1)
        db.conn.commit()
コード例 #39
0
def update_rankings():
    qry = """SELECT * 
    FROM tv_show_data;"""

    res = db.query(qry)

    for row in res:
        entry = {}
        show_name, seasons, episodes, eps_per_season = row

        row_qry = """SELECT 
        show_name, genre, episode_length, peak_grade, consistency_grade, adjustment, overall_grade
        FROM tv_show_grades
        WHERE show_name = "%s";"""

        row_query = row_qry % (show_name)

        try:
            foo, genre, ep_len, peak, consistency, adj, grade = db.query(row_query)[0]
            runtime_hrs = float(episodes*ep_len)/60.0
        except (IndexError, TypeError):
            update_entry = {"show_name":show_name}
            db.insertRowDict(update_entry, 'tv_show_grades', insertMany=False, replace=True, rid=0, debug=1)
            db.conn.commit()
            ep_len, genre, peak, consistency, adj, runtime_hrs, grade = 0,0,0,0,0,0,0

        entry['show_name'] = show_name
        entry['genre'] = genre
        entry['episode_length'] = ep_len
        entry['peak_grade'] = peak
        entry['consistency_grade'] = consistency
        entry['adjustment'] = adj
        entry['approx_runtime_hours'] = runtime_hrs
        entry['overall_grade'] = grade

        db.insertRowDict(entry, 'tv_show_grades', insertMany=False, replace=True, rid=0, debug=1)
        db.conn.commit()
コード例 #40
0
def update_podcast_grades():
    qry = "SELECT * FROM podcasts;"
    res = db.query(qry)

    for row in res:
        podcast_name, genre, peak, consistency, adj, overall = row

        try:
            grade = (float(peak)*3 + float(consistency)*2)/5 + max(adj,0)
        except TypeError:
            grade = 0

        entry = {"podcast_name":podcast_name, "genre":genre, "peak_grade":peak, "consistency_grade":consistency, "adjustment":adj, "overall_grade":grade}

        db.insertRowDict(entry, 'podcasts', insertMany=False, replace=True, rid=0, debug=1)
        db.conn.commit()
コード例 #41
0
def check_for_un_updated():
    print "\n\tchecking for boulders to delete (or add)"
    qry = """SELECT ascent_date, boulder_name, area, sub_area, v_grade, euro_grade, updated
    FROM boulders_completed
    WHERE updated = 'FALSE';"""

    res = db.query(qry)

    if res == ():
        print "\t\tNo boulders to update!"
    else:
        for i, row in enumerate(res):
            print "\nUpdate %s of %s" % (i+1, len(res))

            keys = ["date", "name", "area", "sub area", "hueco", "euro", "updated"]

            for k, v in zip(keys, row):
                print ("\t"+k+":"), "\n", v
        raw_input("\n\nUpdate these problems in the database!\n\n")
コード例 #42
0
def process_completed():
    csv_path = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/boulders_completed.csv"
    csv_file = open(csv_path, "wb")
    append_csv = csv.writer(csv_file)
    csv_header = ["Date", "Boulder Name", "Area", "Sub Area", "V Grade", "Euro Grade", "8a.nu Points", "Flash", "Soft/Hard", "Stars (1-3)", "FA", "Recommended", "Estimated Final Time", "Estimated Attempts", "Estimated Minutes", "Estimated Session #", "Comment", "Updated"]
    append_csv.writerow(csv_header)

    qry = "SELECT * FROM boulders_completed ORDER BY ascent_date DESC;"

    res = db.query(qry)

    for row in res:
        upd = row[-1]
        comm = "".join(row[-2].split("}.")[1:]).replace("*Bounty Extra Soft*.","").strip()
        row = row[:-2]
        row = list(row)
        row.append(comm)
        row.append(upd)
        for i, val in enumerate(row):
            if type(val) in (str,unicode):
                row[i] = '"' + "".join([l if ord(l) < 128 else "" for l in val]).replace("<o>","").replace("<P>","").replace("\n","  ") + '"'
        append_csv.writerow(row)
コード例 #43
0
def process_8a(url, table_name):
    html = requests.get(url, headers=headers)
    soup = BeautifulSoup(html.content, "lxml")
    print "\t\tgrabbed html"

    if table_name != None:
        qry = "UPDATE %s SET updated = 'FALSE';" % table_name
        db.query(qry)
        db.conn.commit()

    ascent_data = []

    for comment in soup.find_all(string=lambda text:isinstance(text, Comment)):
        if comment.strip() == "Ascents":
            next_node = comment.next_sibling

            while next_node and next_node.next_sibling:
                ascent_data.append(next_node)
                next_node = next_node.next_sibling

                if not next_node.name and next_node.strip() == "List Options": break;

    for item in ascent_data:
        if str(item).strip() != "":
            ascents_info = item
            break

    ascents = ascents_info.find_all("tr")

    for i, ascent in enumerate(ascents):
        row = []

        ascent_cells = ascent.findAll("td")

        if len(ascent_cells) == 9:
            entry = {}
            ascent_date = ascent_cells[0]
            grade = ascent_cells[1]
            flash = ascent_cells[2]
            boulder_name = ascent_cells[3]
            recommended = ascent_cells[4]
            areas = ascent_cells[5].getText()
            soft_hard_fa = ascent_cells[6].getText()
            comment = ascent_cells[7]
            stars = len(ascent_cells[8].getText())

            for span_tag in boulder_name.find("span"):
                span_tag.replace_with("")
            boulder_name = boulder_name.getText().strip()
            if boulder_name[0] == "*":
                boulder_name = boulder_name[1:]

            # print str(i-1) + " of " + str(len(ascents)-2) + ": " + boulder_name

            try:
                area = areas.split("/")[0].strip()
                sub_area = areas.split("/")[1].strip()
            except IndexError:
                area = areas.strip()
                sub_area = areas.strip()

            for span_tag in ascent_date.find("span"):
                span_tag.replace_with("")
            ascent_date = ascent_date.getText().strip()
            ascent_date = datetime.strptime(ascent_date, "%y-%m-%d").date()

            grade_JS = grade.getText()
            grade_qry = """SELECT font, hueco, 8a_points
            FROM boulders_grades
            WHERE 8a_javascript = "%s";"""
            grade_query = grade_qry % (grade_JS)
            euro_grade, v_grade, pts_base = db.query(grade_query)[0]

            if flash.find("img")["src"] == "/scorecard/images/56f871c6548ae32aaa78672c1996df7f.gif":
                flash = "FLASH"
            elif flash.find("img")["src"] == "/scorecard/images/e37046f07ac72e84f91d7f29f8455b58.gif":
                flash = "ONSIGHT"
            else:
                flash = None

            if "Soft" in soft_hard_fa:
                soft_hard = "SOFT"
            elif "Hard" in soft_hard_fa:
                soft_hard = "HARD"
            else:
                soft_hard = ""

            if "FA" in soft_hard_fa:
                fa = "FA"
            else:
                fa = None

            if flash == "FLASH":
                pts = pts_base+50
            elif flash == "ONSIGHT":
                pts = pts_base+100
            else:
                pts = pts_base

            if fa == "FA":
                pts += 20

            if recommended.find("img")["src"] == "/scorecard/images/UserRecommended_1.gif":
                recommended = "RECOMMENDED"
            else:
                recommended = None

            for span_tag in comment.find("span"):
                span_tag.replace_with("")
            comment = comment.getText().strip().replace("\n", "")

            if "Total_Duration" in comment:
                duration_dict = comment.split("Total_Duration")[1].split("}")[0].replace("=","").strip()+"}"
                try:
                    duration_dict = ast.literal_eval(duration_dict)
                    final_time = duration_dict.get('Final Time')
                    attempts = duration_dict.get('Attempts')
                    minutes = duration_dict.get('Minutes')
                    session = duration_dict.get('Sessions')
                except SyntaxError:
                    print '\nERROR:', boulder_name, '\n', duration_dict, '\n----------------\n'
                    final_time = None
                    attempts = 0
                    minutes = 0
                    session = 0
            else:
                final_time = None
                attempts = None
                minutes = None
                session = None

            if "*Bounty Extra Soft*." in comment:
                soft_hard = "BOUNTY EXTRA SOFT"


            if attempts == 2:
                pts += 2

            updated = "TRUE"

            entry_columns = ["ascent_date", "boulder_name", "area", "sub_area", "v_grade", "euro_grade", "8a_pts", "flash", "soft_hard", "stars", "fa", "recommended", "final_time", "est_attempts", "est_minutes", "est_sessions", "comment", "updated"]
            row = [ascent_date, boulder_name, area, sub_area, v_grade, euro_grade, pts, flash, soft_hard, stars, fa, recommended, final_time, attempts, minutes, session, comment, updated]

            for j, item in enumerate(row):
                if type(item) in (str, unicode) and item != '':
                    row[j] = "".join([k if ord(k) < 128 else "" for k in item])

            entry = {} 
            for i,j in zip(entry_columns, row):
                entry[i] = j

            if table_name is not None:
                db.insertRowDict(entry, table_name, insertMany=False, replace=True, rid=0, debug=1)
                db.conn.commit()
コード例 #44
0
from py_db import db
import bs4
import time
import datetime
import csv
import os

db = db('personal')

base_path = os.getcwd()

f = open('/Users/connordog/Dropbox/__TempFiles/bookmarks.html','r')
soup = bs4.BeautifulSoup(f.read(), 'html.parser')
f.close()

db.query("Truncate bookmarks;")
db.conn.commit()

categories = [
    'Climbing', 'Multi-Sport Articles', 'Multi-Sport Sites', 'Soccer', 
    'Bash', 'General Programming', 'Git', 'LaTeX', 'Python', 'SQL', 'Ubuntu',
    'Bayesian Articles', 'Data Science Articles', 'Data Science Documentation', 
    'Decision Making and Psychology', 'Individual Profiles', 'Lifestyle and Self-Improvement',
    ]

for sport in ('MLB', 'NHL', 'NBA', 'NFL'):
    for desc in ('Coaching and Player Development', 'Gameplay and Analysis', 'General Articles', 'General Sites', 'Metric Descriptions', 'Profiles', 'Sport Science'):
        cat_label = sport + ' ' + desc
        categories.append(cat_label)

コード例 #45
0
def initiate():
    entry = {}
    print "\nUpdate Attempted Boulder\n"

    last_qry = "SELECT boulder_name, area, sub_area, v_grade, est_attempts, est_minutes, est_date FROM boulders_tried ORDER BY est_date DESC, est_time DESC LIMIT 1;"

    last_entry = db.query(last_qry)[0]
        
    last_name, last_area, last_sub, last_grade, last_attempts, last_minutes, last_date = last_entry

    entry['boulder_name'] = last_name
    entry['area'] = last_area
    entry['sub_area'] = last_sub
    entry['v_grade'] = last_grade
    entry['est_attempts'] = last_attempts
    entry['est_minutes'] = last_minutes
    entry['comment'] = ""
    entry['est_date'] = last_date
    entry['est_time'] = datetime.strptime('00:00:00','%H:%M:%S').time()



    i = 1
    while i < 11:
        if ((i > 0) and (i < 8)):
            i, cats, entry_vals = process_basic(i, entry)
        elif ((i > 7) and (i < 10)):
            i, cats, entry_vals = process_time(i)
        elif ((i > 9) and (i < 11)):
            i, cats, entry_vals = process_complete(i, entry)

        for cat, val in zip(cats, entry_vals):
            if cat is not None:
                entry[cat] = val

    print "\n"

    if entry.get('sub_area') is None:
        sa = ""
    else:
        sa = "\nAND sub_area = '%s'" % (entry.get('sub_area'))

    prev_qry = """SELECT est_date, est_time
    FROM boulders_tried
    WHERE boulder_name = "%s"
    AND area = "%s"%s
    AND (est_date < "%s"
        OR
        (est_date = "%s" AND est_time < "%s")
    );"""

    prev_query = prev_qry % (entry.get('boulder_name'), entry.get('area'), sa, entry.get('est_date'), entry.get('est_date'), entry.get('est_time'))

    prev_cnt = db.query(prev_query)

    sess_num = len(prev_cnt)

    entry["session_num"] = sess_num+1

    db.insertRowDict(entry, 'boulders_tried', insertMany=False, replace=True, rid=0, debug=1)
    db.conn.commit()
コード例 #46
0
def enter_completed(entry2):

    entry2['ascent_date'] = entry2.get('est_date')

    prev_time = entry2.get("est_time")
    prev_dtime = datetime(100, 1, 1, prev_time.hour, prev_time.minute, prev_time.second)
    final_time = prev_dtime + timedelta(minutes=int(entry2.get("est_minutes")))
    entry2["final_time"] = final_time.time()

    grade_query = "SELECT MAX(font) AS font, 8a_points FROM boulders_grades WHERE hueco = '%s' GROUP BY hueco" % (entry2.get("v_grade"))
    grade_res = db.query(grade_query)
    if len(grade_res) != 1:
        print "\n\n\nERROR", str(entry2.get("v_grade")), "HAS LENGTH", str(len(res))
    else:
        euro_grade, pts_base = grade_res[0]

    entry2['euro_grade'] = euro_grade
    pts = pts_base

    previous_qry = """SELECT 
    SUM(est_attempts) AS attempts,
    SUM(est_minutes) AS minutes,
    COUNT(*) AS sessions
    FROM boulders_tried
    WHERE boulder_name = "%s"
    AND area = "%s"
    AND sub_area = "%s"
    AND completed != "TRUE"
    GROUP BY boulder_name, area;"""
    previous_query = previous_qry % (entry2.get("boulder_name"), entry2.get("area"), entry2.get("sub_area"))

    previous_res = db.query(previous_query)
    if len(previous_res) > 1:
        print "\n\n\nERROR", str(entry2.get("boulder_name")), "HAS LENGTH", str(len(res))
    elif len(previous_res) == 0:
        prev_att, prev_min, prev_sess = [0,0,0]
    else:
        prev_att, prev_min, prev_sess = previous_res[0]

    est_attempts = int(entry2.get("est_attempts")) + prev_att
    entry2["est_attempts"] = est_attempts

    flash = None
    if est_attempts == 1:
        flash = "FLASH"
        pts += 50
    entry2["flash"] = flash

    if est_attempts == 2:
        pts += 2

    if entry2.get("FA") == "FA":
        pts += 20
        

    entry2["8a_pts"] = pts

    est_minutes = int(entry2.get("est_minutes")) + prev_min
    entry2["est_minutes"] = est_minutes

    est_sessions = 1 + int(prev_sess)
    entry2["est_sessions"] = est_sessions

    comment = "Total_Duration={'Final Time':'" + str(final_time.hour).zfill(2) + ":" + str(final_time.minute).zfill(2) + "', 'Attempts':" + str(est_attempts) + ", 'Minutes':" + str(est_minutes) + ", 'Sessions':" + str(est_sessions) + "}. " + str(entry2.get("comment"))
    entry2['comment'] = comment

    updated = "FALSE"

    del entry2['est_time']
    del entry2['est_date']

    db.insertRowDict(entry2, 'boulders_completed', insertMany=False, replace=True, rid=0, debug=1)
    db.conn.commit()
コード例 #47
0
def process_breakdown():
    csv_path = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/connor-r.github.io/csvs/boulders_yearlyBreakdown.csv"
    csv_file = open(csv_path, "wb")
    append_csv = csv.writer(csv_file)
    csv_header = ["Year", "8a Top 10 Points", "Climbing Days", "Completed Problems", "Tried Problems", "Completed per Day", "Tried per Day", "Success Rate"]
    append_csv.writerow(csv_header)
    
    qry = """SELECT 
    year AS 'Year', pts AS '8a Top 10 Points', 
    days AS 'Climbing Days', 
    completed_cnt AS 'Completed Problems', 
    tried_cnt AS 'Tried Problems', 
    ROUND(completed_cnt/days, 1) AS 'Completed per Day',
    ROUND(tried_cnt/days, 1) AS 'Tried per Day',
    ROUND(completed_cnt/tried_cnt,3) AS 'Success Rate'
    FROM(
        SELECT 'All Time' AS 'Year', SUM(8a_pts) AS 'pts'
        FROM(
            SELECT *
            FROM boulders_completed
            ORDER BY 8a_pts DESC
            LIMIT 10
        ) a
        UNION ALL
        SELECT 'Last Calendar Year' AS 'Year', SUM(8a_pts) AS 'pts'
        FROM(
            SELECT *
            FROM boulders_completed
            WHERE ascent_date > DATE_ADD(curdate(), INTERVAL -1 YEAR)
            ORDER BY 8a_pts DESC
            LIMIT 10
        ) b
        UNION ALL
        SELECT c_year AS 'Year', SUM(8a_pts) AS 'pts'
        FROM(
            SELECT YEAR(ascent_date) AS 'c_year', 8a_pts, @year, @auto,
            IF(@year=(@year:=YEAR(ascent_date)), @auto:=@auto+1, @auto:=1) indx 
            FROM boulders_completed, (SELECT @year:=0, @auto:=1) a
            ORDER BY YEAR(ascent_date), 8a_pts DESC
        ) c
        WHERE indx <= 10
        GROUP BY c_year DESC
    ) pts
    JOIN(
        SELECT 
        'All Time' AS 'Year', COUNT(DISTINCT est_date) AS days
        FROM boulders_tried
        WHERE YEAR (est_date) != 0
        UNION ALL
        SELECT 
        'Last Calendar Year' AS 'Year', COUNT(DISTINCT est_date) AS days
        FROM boulders_tried
        WHERE YEAR (est_date) != 0
        AND est_date > DATE_ADD(curdate(), INTERVAL -1 YEAR)
        UNION ALL
        SELECT 
        YEAR(est_date) AS 'Year', COUNT(DISTINCT est_date) AS days
        FROM boulders_tried
        WHERE YEAR (est_date) != 0
        GROUP BY YEAR(est_date)
        ORDER BY YEAR DESC
    ) days USING (YEAR)
    JOIN(
        SELECT 'All Time' AS 'Year', COUNT(*) AS completed_cnt
        FROM boulders_completed
        UNION ALL
        SELECT 'Last Calendar Year' AS 'Year', COUNT(*) AS completed_cnt
        FROM boulders_completed
        WHERE ascent_date > DATE_ADD(curdate(), INTERVAL -1 YEAR)
        UNION ALL
        SELECT 
        YEAR(ascent_date) AS 'Year', COUNT(*) AS completed_cnt
        FROM boulders_completed
        GROUP BY YEAR(ascent_date)
        ORDER BY YEAR DESC
    ) cnt USING (YEAR)
    JOIN(
        SELECT 'All Time' AS 'Year', COUNT(*) AS tried_cnt
        FROM boulders_tried
        UNION ALL
        SELECT 'Last Calendar Year' AS 'Year', COUNT(*) AS tried_cnt
        FROM boulders_tried
        WHERE est_date > DATE_ADD(curdate(), INTERVAL -1 YEAR)
        UNION ALL
        SELECT 
        YEAR(est_date) AS 'Year', COUNT(*) AS tried_cnt
        FROM boulders_tried
        GROUP BY YEAR(est_date)
        ORDER BY YEAR DESC
    ) tried USING (YEAR)
    ORDER BY YEAR='All Time' DESC, YEAR='current' DESC, YEAR DESC;"""

    res = db.query(qry)

    for row in res:
        row = list(row)
        append_csv.writerow(row)