示例#1
0
def write_txt_from_challonge(url, file):
    """Writes the results from a Challonge URL to TxtFile.
Challonge: a string; the URL for a Challonge.
TxtFile: a string; the name of the file to be written.
Example: WriteTxtFromChallonge('http://apex2015melee.challonge.com/singles', 'Apex 2015')"""
    file = sf.add_txt(file)
    webpage = getHTML(url)[0].replace('Mark as In Progress\n\n\nUnmark as In Progress\n\n\n\n', '') \
        .replace('\n\n\n\n\n\nEdit\n\n\nReopen', '').split('\n\n\n\nMatch Details\n\n\n\n\n\n\n')[1:]

    parsed_matches = ""

    for item in webpage:
        item = item.splitlines()
        if item[2] == "" or item[7] == "":
            continue
        try:
            if int(item[24]) < 0:
                continue
        except:
            pass
        try:
            if int(item[27]) < 0:
                continue
        except:
            pass

        line = item[2] + "," + item[24] + "," + item[7] + "," + item[27]
        line = sf.strip_match(line)
        if line is not None and sf.parse_match(line) != "":
            parsed_matches += sf.parse_match(line) + "\n"

    with open(file, 'a') as file:
        file.write(parsed_matches)
def write_txt_from_challonge(url, file):
    """Writes the results from a Challonge URL to TxtFile.
Challonge: a string; the URL for a Challonge.
TxtFile: a string; the name of the file to be written.
Example: WriteTxtFromChallonge('http://apex2015melee.challonge.com/singles', 'Apex 2015')"""
    file = sf.add_txt(file)
    webpage = getHTML(url)[0].replace('Mark as In Progress\n\n\nUnmark as In Progress\n\n\n\n', '') \
        .replace('\n\n\n\n\n\nEdit\n\n\nReopen', '').split('\n\n\n\nMatch Details\n\n\n\n\n\n\n')[1:]

    parsed_matches = ""

    for item in webpage:
        item = item.splitlines()
        if item[2] == "" or item[7] == "":
            continue
        try:
            if int(item[24]) < 0:
                continue
        except:
            pass
        try:
            if int(item[27]) < 0:
                continue
        except:
            pass

        line = item[2] + "," + item[24] + "," + item[7] + "," + item[27]
        line = sf.strip_match(line)
        if line is not None and sf.parse_match(line) != "":
            parsed_matches += sf.parse_match(line) + "\n"

    with open(file, 'a') as file:
        file.write(parsed_matches)
示例#3
0
def write_txt_from_liquipedia(url, filename):
    """Returns match data from a Liquipedia link."""
    url = format_liquipedia_url(url)

    try:
        soup = BeautifulSoup(requests.get(url).content)
    except http.client.IncompleteRead as e:
        soup = BeautifulSoup(e.partial)

    match_data = str(soup.find("textarea"))

    matches = ""
    prev_line_start = "xxxx"
    for line in match_data.split("\n"):
        if re.match('^\|[rl]\d+m\d+', line):
            if line.startswith(prev_line_start):
                matches += " " + line
            else:
                matches += "\n" + line
                prev_line_start = re.sub('^(\|[rl]\d+m\d+).*', r'\1', line)

    parsed_matches = ""
    for line in matches.split("\n"):
        stripped_line = sf.strip_match(line)
        if stripped_line is not None and sf.match_played(url, line):
            parsed_match = sf.parse_match(stripped_line)
            if parsed_match != "":
                parsed_matches += parsed_match + "\n"

    with open(filename, 'a', encoding="utf8") as file:
        file.write(parsed_matches)
def write_txt_from_liquipedia(url, filename):
    """Returns match data from a Liquipedia link."""
    url = format_liquipedia_url(url)

    try:
        soup = BeautifulSoup(requests.get(url).content)
    except http.client.IncompleteRead as e:
        soup = BeautifulSoup(e.partial)

    match_data = str(soup.find("textarea"))

    matches = ""
    prev_line_start = "xxxx"
    for line in match_data.split("\n"):
        if re.match('^\|[rl]\d+m\d+', line):
            if line.startswith(prev_line_start):
                matches += " " + line
            else:
                matches += "\n" + line
                prev_line_start = re.sub('^(\|[rl]\d+m\d+).*', r'\1', line)

    parsed_matches = ""
    for line in matches.split("\n"):
        stripped_line = sf.strip_match(line)
        if stripped_line is not None and sf.match_played(url, line):
            parsed_match = sf.parse_match(stripped_line)
            if parsed_match != "":
                parsed_matches += parsed_match + "\n"

    with open(filename, 'a', encoding="utf8") as file:
        file.write(parsed_matches)