Example #1
0
from app.sites import Site, Scraper
from app.utils import make_request, strip, TableParser
from data.data import nhl


def betdsi_nhl_extractor(page):
    tp = TableParser()
    tp.feed(strip(page))
    tables = tp.get_tables()

    # Get rid of garbage rows
    tables = [t for t in tables if len(t) == 2]

    # Find team names and moneylines
    pairs = []
    for table in tables:
        name1 = table[0][1].strip()
        name2 = table[1][0]
        moneyline1 = table[0][-1].strip()
        moneyline2 = table[1][-1].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


betdsi = Site("betdsi.eu", {})
betdsi.scrapers[nhl] = Scraper(url="http://www.betdsi.eu/hockey-betting",
                               fetch_fn=make_request,
                               extract_fn=betdsi_nhl_extractor)
Example #2
0
    tp.feed(page)
    tables = tp.get_tables()

    # Get rid of garbage lines in the table
    tables = tables[0][1:]

    # Find team names and moneylines
    pairs = []
    for i in range(len(tables)/2):
        name1 = tables[i*2][2].strip().split(" ")
        name1 = name1[0] if len(name1) == 1 else " ".join(name1[1:])

        name2 = tables[i*2+1][1].strip().split(" ")
        name2 = name2[0] if len(name2) == 1 else " ".join(name2[1:])

        moneyline1 = str(tables[i*2][-1]).strip()
        moneyline2 = str(tables[i*2+1][-1]).strip()

        if moneyline1 == '0' or moneyline2 == '0':
            continue

        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


mybookie = Site("mybookie.ag", {})
mybookie.scrapers[nhl] = Scraper(
    url="http://mybookie.ag/sportsbook/nhl-betting-lines/",
    fetch_fn=make_request,
    extract_fn=mybookie_nhl_extractor)
Example #3
0
from app.sites import Site, Scraper
from app.utils import make_request, strip, TableParser
from data.data import nhl


def bodog_nhl_extractor(page):
    tp = TableParser()
    tp.feed(strip(page))
    tables = tp.get_tables()

    # Get rid of garbage rows
    rows = [r for t in tables for r in t if len(r) > 3][1:]

    # Find team names and moneylines
    pairs = []
    for i in range(len(rows)/2):
        name1 = rows[i*2][2].strip()
        name2 = rows[i*2+1][1].strip()
        moneyline1 = rows[i*2][4].strip()
        moneyline2 = rows[i*2+1][3].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


bodog = Site("sports.bodog.eu", {})
bodog.scrapers[nhl] = Scraper(
    url="http://sports.bodog.eu/sports-betting/nhl-hockey-lines.jsp",
    fetch_fn=make_request,
    extract_fn=bodog_nhl_extractor)
Example #4
0
from app.utils import make_request, strip, TableParser
from data.data import nhl


def bovada_nhl_extractor(page):
    tp = TableParser()
    tp.feed(strip(page))
    tables = tp.get_tables()

    # Get rid of garbage lines in the table
    tables = tables[1:]
    for i, t in enumerate(tables):
        tables[i] = max(t, key=lambda x: len(x))

    # Find the team names and moneylines
    pairs = []
    for i in range(len(tables) / 2):
        name1 = tables[i * 2][2].strip()
        name2 = tables[i * 2 + 1][1].strip()
        moneyline1 = tables[i * 2][4].strip()
        moneyline2 = tables[i * 2 + 1][3].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


bovada = Site("bovada.lv", {})
bovada.scrapers[nhl] = Scraper(
    url="http://sports.bovada.lv/sports-betting/nhl-hockey-lines.jsp",
    fetch_fn=make_request,
    extract_fn=bovada_nhl_extractor)
Example #5
0
import re

from app.sites import Site, Scraper
from app.utils import make_request
from data.data import nhl


def sportsbook_nhl_extractor(page):
    page_stripped = re.sub("\t|\r\n|\n", "", page)
    teams = re.findall('<span class="team" .+?>(.+?)</span>', page_stripped)
    moneylines = re.findall(' ate="ML[AH]"><div class="market">' + "(.+?)</div></a>", page_stripped)

    if len(teams) != len(moneylines):
        return []

    pairs = [
        ((teams[i * 2], moneylines[i * 2]), (teams[i * 2 + 1], moneylines[i * 2 + 1])) for i in range(len(teams) / 2)
    ]
    return pairs


sportsbook = Site("sportsbook.ag", {})
sportsbook.scrapers[nhl] = Scraper(
    url="https://www.sportsbook.ag/sbk/sportsbook4/" + "nhl-betting/nhl-game-lines.sbk",
    fetch_fn=make_request,
    extract_fn=sportsbook_nhl_extractor,
)
Example #6
0
from app.sites import Site, Scraper
from app.utils import make_request, TableParser
from data.data import nhl


def topbet_nhl_extractor(page):
    tp = TableParser()
    tp.feed(page)
    tables = tp.get_tables()

    # Get rid of garbage tables
    tables = [t for t in tables if len(t) == 3 and t[0][0] == t[0][1] == 0]

    # Find team names and moneylines
    pairs = []
    for table in tables:
        name1 = table[1][1].strip()
        name2 = table[2][1].strip()
        moneyline1 = table[1][5].strip()
        moneyline2 = table[2][5].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


topbet = Site("topbet.eu", {})
topbet.scrapers[nhl] = Scraper(
    url="http://topbet.eu/sportsbook/nhl",
    fetch_fn=make_request,
    extract_fn=topbet_nhl_extractor)
Example #7
0
import re

from app.sites import Site, Scraper
from app.utils import make_request
from data.data import nhl


def sportsbook_nhl_extractor(page):
    page_stripped = re.sub("\t|\r\n|\n", "", page)
    teams = re.findall("<span class=\"team\" .+?>(.+?)</span>", page_stripped)
    moneylines = re.findall(
        " ate=\"ML[AH]\"><div class=\"market\">" + "(.+?)</div></a>",
        page_stripped)

    if len(teams) != len(moneylines):
        return []

    pairs = [((teams[i * 2], moneylines[i * 2]), (teams[i * 2 + 1],
                                                  moneylines[i * 2 + 1]))
             for i in range(len(teams) / 2)]
    return pairs


sportsbook = Site("sportsbook.ag", {})
sportsbook.scrapers[nhl] = Scraper(
    url="https://www.sportsbook.ag/sbk/sportsbook4/" +
    "nhl-betting/nhl-game-lines.sbk",
    fetch_fn=make_request,
    extract_fn=sportsbook_nhl_extractor)
Example #8
0
    finally:
        driver.close()
    return source


def sportsbetting_nhl_extractor(page):
    tp = TableParser()
    tp.feed(page)
    tables = tp.get_tables()

    # Clean up tables
    tables = tables[3][2:]
    tables = [r for r in tables if len(r) > 20]

    # Extract names/lines
    pairs = []
    for i in range(len(tables) / 2):
        name1 = tables[i * 2][2].strip()
        name2 = tables[i * 2 + 1][1].strip()
        moneyline1 = str(tables[i * 2][9]).strip()
        moneyline2 = str(tables[i * 2 + 1][8]).strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


sportsbetting = Site("sportsbetting.ag", {})
sportsbetting.scrapers[nhl] = Scraper(
    url="http://www.sportsbetting.ag/sportsbook",
    fetch_fn=sportsbetting_nhl_fetcher,
    extract_fn=sportsbetting_nhl_extractor)
Example #9
0
from app.sites import Site, Scraper
from app.utils import make_request, strip, TableParser
from data.data import nhl


def betdsi_nhl_extractor(page):
    tp = TableParser()
    tp.feed(strip(page))
    tables = tp.get_tables()

    # Get rid of garbage rows
    tables = [t for t in tables if len(t) == 2]

    # Find team names and moneylines
    pairs = []
    for table in tables:
        name1 = table[0][1].strip()
        name2 = table[1][0]
        moneyline1 = table[0][-1].strip()
        moneyline2 = table[1][-1].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


betdsi = Site("betdsi.eu", {})
betdsi.scrapers[nhl] = Scraper(
    url="http://www.betdsi.eu/hockey-betting",
    fetch_fn=make_request,
    extract_fn=betdsi_nhl_extractor)
Example #10
0
    finally:
        driver.close()
    return source


def sportsbetting_nhl_extractor(page):
    tp = TableParser()
    tp.feed(page)
    tables = tp.get_tables()

    # Clean up tables
    tables = tables[3][2:]
    tables = [r for r in tables if len(r) > 20]

    # Extract names/lines
    pairs = []
    for i in range(len(tables)/2):
        name1 = tables[i*2][2].strip()
        name2 = tables[i*2+1][1].strip()
        moneyline1 = str(tables[i*2][9]).strip()
        moneyline2 = str(tables[i*2+1][8]).strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


sportsbetting = Site("sportsbetting.ag", {})
sportsbetting.scrapers[nhl] = Scraper(
    url="http://www.sportsbetting.ag/sportsbook",
    fetch_fn=sportsbetting_nhl_fetcher,
    extract_fn=sportsbetting_nhl_extractor)
Example #11
0
from app.sites import Site, Scraper
from app.utils import make_request
from data.data import nhl


def sportsinteraction_nhl_extractor(page):
    # pull rows with regular moneylines out of page
    page_stripped = re.sub("\t|\r\n|\n", "", page)
    row_pattern = "<span class=\"name\">(.+?)</span>" + \
                  "<span class=\"handicap\">(.+?)</span>" + \
                  "<span class=\"price\">(.+?)</span>"
    rows = re.findall(row_pattern, page_stripped)
    moneyline_rows = [(r[0], r[2]) for r in rows if r[1].strip() == "&nbsp;"]

    # Find team names and moneylines
    pairs = []
    for i in range(len(moneyline_rows) / 2):
        name1 = moneyline_rows[i * 2][0].strip()
        name2 = moneyline_rows[i * 2 + 1][0].strip()
        moneyline1 = moneyline_rows[i * 2][1].strip()
        moneyline2 = moneyline_rows[i * 2 + 1][1].strip()
        pairs.append(((name1, moneyline1), (name2, moneyline2)))
    return pairs


sportsinteraction = Site("sportsinteraction.com", {})
sportsinteraction.scrapers[nhl] = Scraper(
    url="http://www.sportsinteraction.com/hockey/nhl-betting-lines/",
    fetch_fn=make_request,
    extract_fn=sportsinteraction_nhl_extractor)