Example #1
0
def get_lineup_info(teampg_url, n_combo):
    '''
    For a team, gets lineup-by-lineup statistics for each individual combination of n_combo (a value in
    (2,5)) players
    
    Given:
        teampg_url, a string giving the url of the Basketball-Reference page for a team for a given season
        n_combo, an integer specifying the level of lineup combination (2, 3, 4, or 5 players)
    Return:
        df, a dataframe of the team's statistics for each possible n_combo lineup of players     
    '''

    # Parse the team/season's lineup page
    main_url = teampg_url + '/lineups'
    soup = get_soup(main_url)

    # Find the table with all n_combo man lineups
    tbl_string = 'lineups_' + str(n_combo) + '-man_'
    tbl = soup.find('table', {'id': tbl_string})

    # Create dictionary whose keys are the lineup combination and values are all the statistics from the
    # table, then convert to a dataframe
    d = {}
    rows = [row for row in tbl.find_all('tr')]
    for row in rows[:-1]:
        items = row.find_all('td')
        try:
            comb = items[0].text
            d[comb] = [j.text.strip() for j in items[1:]]
        except:
            continue
    df = pd.DataFrame.from_dict(d, orient='index')
    df.columns = [k.text for k in rows[1].find_all('th')][2:]
    return df
Example #2
0
def get_season_teams(season):
    '''
    Returns the Basketball-Reference page urls for each team that played during a specified season 
    
    Given:
        season, the (int) end year of a given NBA regular season
    Return:
        d, a dictionary of each team and the url of the team page for the given season
    '''

    # Parse the season page
    year = season.split('-')[1]
    szn_url = 'https://www.basketball-reference.com/leagues/NBA_' + year + '.html'
    soup = get_soup(szn_url)

    # Find a table listing all teams
    tbl = soup.find('table', {'id': 'team-stats-per_game'})

    # Fill dictionary of all team names (keys) and team pages (values) for the season
    d = {}
    rows = [row for row in tbl.find_all('tr')]
    for row in rows[:-1]:
        items = row.find_all('td')
        try:
            team = items[0].text.replace('*', '')
            d[team] = row.find('a').get('href')
        except:
            continue
    return d
Example #3
0
from bokeh.models import Div

extract_contents = lambda row: [x.text.replace('\n', '') for x in row]
URL = 'https://www.mohfw.gov.in/'
SHORT_HEADERS = [
    'SNo', 'State', 'Indian-Confirmed(Including Foreign Confirmed)', 'Cured',
    'Death'
]
response = requests.get(URL).content
soup = BeautifulSoup(response, 'html.parser')
updatedon = soup.find('div', class_="status-update").find('h2').text
header = extract_contents(soup.tr.find_all('th'))
stats = []
all_rows = soup.find_all('tr')
for row in all_rows:
    stat = extract_contents(row.find_all('td'))
    if stat:
        if len(stat) == 4:
            # last row
            stat = ['', *stat]
            stats.append(stat)
        elif len(stat) == 5:
            stats.append(stat)
stats[-1][0] = len(stats)
stats[-1][1] = "Total Cases"
objects = []
for row in stats:
    objects.append(row[1])
y_pos = np.arange(len(objects))
performance = []
for row in stats[:len(stats) - 1]: