Exemple #1
0
def search_game(query, rand=None, found=None):
    game_url = twitch_api_url + 'search/games?type=suggest&live=true&q=' + query.replace(
        ' ', '%20')
    results = json.load(http.open(game_url))

    if not results['games']:
        return 'No livestreams found for games matching ' + query
    game_name = results['games'][0]['name']
    search_url = twitch_api_url + 'search/streams?limit=1&q=' + game_name.replace(
        ' ', '%20')
    search_results = json.load(http.open(search_url))

    total = search_results['_total']
    if not rand:
        offset = 0
    while not found:
        if rand:
            offset = random.randint(0, (total - 1))
        result_url = search_url + '&offset=' + str(offset)
        result = json.load(http.open(result_url))['streams'][0]
        print result['game'] + ' == ' + game_name
        if result['game'] == game_name:
            found = '1'
        elif not rand:
            offset += 1

    output = '[LIVE] ' + result['channel'][
        'status'] + ' | Playing: ' + game_name + ' | ' + str(
            result['viewers']) + ' viewers | ' + result['channel']['url']
    return output.replace(''', '\'')
Exemple #2
0
def goatkcd(inp):
    '.goatkcd/.xkcd [#] [sfw] -- returns a goatkcd comic with specified number (if exists), random otherwise.'
    key = inp
    strips = 'strips'
    nsfw_tag = '(NSFW) '

    if inp.endswith("sfw"):
        key = key[:-4].strip()
        strips += '_sfw'  #change url used from /strips to /strips_sfw
        nsfw_tag = ''  #remove nsfw tag

    if not key:  #if no comic number specified get random
        key = 'random'
    elif key.isalpha():  #in a valid command, should be no characters
        return '.goatkcd/.xkcd [#] [sfw] -- returns a goatkcd comic with specified number (if exists), random otherwise.'

    if key != 'random':  #check to make sure random comic actually exists
        try:
            http.open(
                goatkcd_url + strips + key +
                '.jpg').getcode()  #check if given comic number actually exists
        except http.HTTPError:
            key = 'random'  #if it doesn't then just use random

    base_url = http.open(goatkcd_url +
                         key).geturl()  #to get the comic number for randoms
    img_id = base_url.strip(goatkcd_url)  #get just the comic number
    img_url = goatkcd_url + strips + '/' + img_id + '.jpg'  #combine them all together to form the full url

    return nsfw_tag + img_url
Exemple #3
0
def search_user(query):
    channel_url = twitch_api_url + 'channels/' + query
    results = json.load(http.open(channel_url))
    try:
        return results['message']
    except:
        pass

    search_url = twitch_api_url + 'search/streams?limit=100&q=' + results[
        'display_name']
    print search_url
    search_results = json.load(http.open(search_url))

    for i in range(0, 100):
        try:
            stream = search_results['streams'][i]
            print stream['channel']['url'] + ' = ' + results['url']
            if stream['channel']['url'] == results['url']:
                output = '[LIVE] ' + results[
                    'status'] + ' | Playing: ' + stream['game'] + ' | ' + str(
                        stream['viewers']) + ' viewers | ' + results['url']
                return output.replace(''', '\'')

        except:
            return '[OFF] ' + results['url']

    return '[OFF] ' + results['url']
Exemple #4
0
def search_game(query,rand=None,found=None):
    game_url = twitch_api_url + 'search/games?type=suggest&live=true&q=' + query.replace(' ','%20')
    results = json.load(http.open(game_url))
    
    if not results['games']:
       return 'No livestreams found for games matching ' + query
    game_name = results['games'][0]['name']
    search_url = twitch_api_url + 'search/streams?limit=1&q=' + game_name.replace(' ','%20')
    search_results = json.load(http.open(search_url))
    
    total = search_results['_total']
    if not rand:
       offset = 0
    while not found:
       if rand:
           offset = random.randint(0,(total-1))
       result_url = search_url + '&offset=' + str(offset)
       result = json.load(http.open(result_url))['streams'][0]
       print result['game'] + ' == ' + game_name
       if result['game'] == game_name:
           found = '1'
       elif not rand:
           offset += 1
    
    output = '[LIVE] ' + result['channel']['status'] + ' | Playing: ' + game_name + ' | ' + str(result['viewers']) + ' viewers | ' + result['channel']['url']
    return output.replace(''','\'')
Exemple #5
0
def goatkcd(inp):
    '.goatkcd/.xkcd [#] [sfw] -- returns a goatkcd comic with specified number (if exists), random otherwise.'
    key = inp
    strips = 'strips'
    nsfw_tag = '(NSFW) '

    if inp.endswith("sfw"):
        key = key[:-4].strip()
        strips += '_sfw' #change url used from /strips to /strips_sfw
        nsfw_tag = '' #remove nsfw tag

    if not key: #if no comic number specified get random
	    key = 'random'
    elif key.isalpha(): #in a valid command, should be no characters
           return '.goatkcd/.xkcd [#] [sfw] -- returns a goatkcd comic with specified number (if exists), random otherwise.'  

    if key != 'random': #check to make sure random comic actually exists
        try:
            http.open(goatkcd_url + strips + key + '.jpg').getcode() #check if given comic number actually exists
        except http.HTTPError:
            key = 'random' #if it doesn't then just use random

    base_url = http.open(goatkcd_url + key).geturl() #to get the comic number for randoms
    img_id = base_url.strip(goatkcd_url) #get just the comic number
    img_url = goatkcd_url + strips + '/' + img_id + '.jpg' #combine them all together to form the full url

    return nsfw_tag + img_url
Exemple #6
0
def search_rand(found=None):
    games_url = twitch_api_url + 'games/top?limit=1'
    games = json.load(http.open(games_url))
    total = games['_total']

    offset = random.randint(0,(total-1))
    rand_game_url = games_url + '&offset=' + str(offset)
    rand_game = json.load(http.open(rand_game_url))['top'][0]['game']
    
    return search_game(rand_game['name'],'1')
Exemple #7
0
def search_rand(found=None):
    games_url = twitch_api_url + 'games/top?limit=1'
    games = json.load(http.open(games_url))
    total = games['_total']

    offset = random.randint(0, (total - 1))
    rand_game_url = games_url + '&offset=' + str(offset)
    rand_game = json.load(http.open(rand_game_url))['top'][0]['game']

    return search_game(rand_game['name'], '1')
Exemple #8
0
def sndsc_url(match, bot=None):
    api_key = bot.config.get("api_keys", {}).get("soundcloud")
    if not api_key:
        print "Error: no api key set"
        return None
    url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
          match.group(4).split(' ')[0]
    return soundcloud(http.open(url).url, api_key)
Exemple #9
0
def sndsc_url(match, bot=None):
    api_key = bot.config.get("api_keys", {}).get("soundcloud")
    if not api_key:
        print "Error: no api key set"
        return None
    url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
          match.group(4).split(' ')[0]
    return soundcloud(http.open(url).url, api_key)
Exemple #10
0
def search_general(query):
    search_url = twitch_api_url + 'search/streams?limit=1&q=' + query.replace(' ','%20')
    try:
        result = json.load(http.open(search_url))['streams'][0]
    except:
        return 'No results found.'
    
    output = '[LIVE] ' + result['channel']['status'] + ' | Playing: ' + result['game'] + ' | ' + str(result['viewers']) + ' viewers | ' + result['channel']['url']
    return output.replace(''','\'')
Exemple #11
0
def search_general(query):
    search_url = twitch_api_url + 'search/streams?limit=1&q=' + query.replace(
        ' ', '%20')
    try:
        result = json.load(http.open(search_url))['streams'][0]
    except:
        return 'No results found.'

    output = '[LIVE] ' + result['channel']['status'] + ' | Playing: ' + result[
        'game'] + ' | ' + str(
            result['viewers']) + ' viewers | ' + result['channel']['url']
    return output.replace(''', '\'')
Exemple #12
0
def validate(inp):
    "validate <url> -- Runs url through the w3c markup validator."

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        errorcount = info['x-w3c-validator-errors']
        warningcount = info['x-w3c-validator-warnings']
        return "%s was found to be %s with %s errors and %s warnings." \
                " see: %s" % (inp, status, errorcount, warningcount, url)
Exemple #13
0
def validate(inp):
    """validate <url> -- Runs url through the w3c markup validator."""

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        error_count = info['x-w3c-validator-errors']
        warning_count = info['x-w3c-validator-warnings']
        return "{} was found to be {} with {} errors and {} warnings." \
               " see: {}".format(inp, status, error_count, warning_count, url)
Exemple #14
0
def search_user(query):
    channel_url = twitch_api_url + 'channels/' + query
    results = json.load(http.open(channel_url))
    try:
        return results['message']
    except:
        pass
 
    search_url = twitch_api_url + 'search/streams?limit=100&q=' + results['display_name']
    print search_url
    search_results = json.load(http.open(search_url))

    for i in range(0,100):
        try:
            stream = search_results['streams'][i]
            print stream['channel']['url'] + ' = ' + results['url']
            if stream['channel']['url'] == results['url']:
                output = '[LIVE] ' + results['status'] + ' | Playing: ' + stream['game'] + ' | ' +  str(stream['viewers']) + ' viewers | ' + results['url']
                return output.replace('&#39;','\'')
                
        except:
            return '[OFF] ' + results['url']

    return '[OFF] ' + results['url']
Exemple #15
0
def validate(inp):
    ".validate <url> -- runs url through w3c markup validator"

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        errorcount = info['x-w3c-validator-errors']
        warningcount = info['x-w3c-validator-warnings']
        return "%s was found to be %s with %s errors and %s warnings." \
            " see: %s" % (inp, status, errorcount, warningcount, url)
Exemple #16
0
def ghissues(inp):
    """ghissues username/repo [number] - Get specified issue summary, or open issue count """
    args = inp.split(" ")
    try:
        if args[0] in shortcuts:
            repo = shortcuts[args[0]]
        else:
            repo = args[0]
        url = "https://api.github.com/repos/{}/issues".format(repo)
    except IndexError:
        return "Invalid syntax. .github issues username/repo [number]"
    try:
        url += "/%s" % args[1]
        number = True
    except IndexError:
        number = False
    try:
        data = json.loads(http.open(url).read())
        print url
        if not number:
            try:
                data = data[0]
            except IndexError:
                print data
                return "Repo has no open issues"
    except ValueError:
        return "Invalid data returned. Check arguments (.github issues username/repo [number]"
    fmt = "Issue: #{} ({}) by {}: {} | {} {}".format(number, state, user.login,
                                                     title, truncate(body),
                                                     gitio.gitio(data.url))
    fmt1 = "Issue: #{} ({}) by {}: {} {}".format(number, state, user.login,
                                                 title, gitio.gitio(data.url))
    number = data["number"]
    if data["state"] == "open":
        state = u"\x033\x02OPEN\x02\x0f"
    else:
        state = u"\x034\x02CLOSED\x02\x0f by {}".format(
            data["closed_by"]["login"])
    user = data["user"]["login"]
    title = data["title"]
    summary = truncate(data["body"])
    gitiourl = gitio(data["html_url"])
    if "Failed to get URL" in gitiourl:
        gitiourl = gitio(data["html_url"] + " " + repo.split("/")[1] + number)
    if summary == "":
        return fmt1 % (number, state, user, title, gitiourl)
    else:
        return fmt % (number, state, user, title, summary, gitiourl)
Exemple #17
0
def dinner(inp):
    """dinner - WTF IS FOR DINNER"""
    try:
        page = http.open(random_url)
    except (http.HTTPError, http.URLError) as e:
        return "Could not get recipe: {}".format(e)
    url = page.geturl()

    try:
        data = get_data(url)
    except ParseError as e:
        return "Could not parse recipe: {}".format(e)

    name = data["name"].strip()
    text = random.choice(phrases).format(name)

    return u"{} - {}".format(text, web.try_isgd(url))
Exemple #18
0
def title(inp):
    "title <url> -- gets the title of a web page"
    url = urlnorm.normalize(inp.encode('utf-8'), assume_scheme="http")

    try:
        page = http.open(url)
        real_url = page.geturl()
        soup = BeautifulSoup(page.read())
    except (http.HTTPError, http.URLError):
        return "Could not fetch page."

    title = soup.find('title').contents[0]

    if not title:
        return "Could not find title."

    return u"{} [{}]".format(title, real_url)
Exemple #19
0
def ghissues(inp):
    """ghissues username/repo [number] - Get specified issue summary, or open issue count """
    args = inp.split(" ")
    try:
        if args[0] in shortcuts:
            repo = shortcuts[args[0]]
        else:
            repo = args[0]
        url = "https://api.github.com/repos/{}/issues".format(repo)
    except IndexError:
        return "Invalid syntax. .github issues username/repo [number]"
    try:
        url += "/%s" % args[1]
        number = True
    except IndexError:
        number = False
    try:
        data = json.loads(http.open(url).read())
        print url
        if not number:
            try:
                data = data[0]
            except IndexError:
                print data
                return "Repo has no open issues"
    except ValueError:
        return "Invalid data returned. Check arguments (.github issues username/repo [number]"
    fmt = "Issue: #{} ({}) by {}: {} | {} {}"  # (number, state, user.login, title, truncate(body), gitio.gitio(data.url))
    fmt1 = "Issue: #{} ({}) by {}: {} {}"  # (number, state, user.login, title, gitio.gitio(data.url))
    number = data["number"]
    if data["state"] == "open":
        state = u"\x033\x02OPEN\x02\x0f"
    else:
        state = u"\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"])
    user = data["user"]["login"]
    title = data["title"]
    summary = truncate(data["body"])
    gitiourl = gitio(data["html_url"])
    if "Failed to get URL" in gitiourl:
        gitiourl = gitio(data["html_url"] + " " + repo.split("/")[1] + number)
    if summary == "":
        return fmt1 % (number, state, user, title, gitiourl)
    else:
        return fmt % (number, state, user, title, summary, gitiourl)
Exemple #20
0
def validate(inp):
    ".validate <url> -- runs url through w3c markup validator"

    if not inp.startswith("http://"):
        inp = "http://" + inp

    url = "http://validator.w3.org/check?uri=" + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info["x-w3c-validator-status"].lower()
    if status in ("valid", "invalid"):
        errorcount = info["x-w3c-validator-errors"]
        warningcount = info["x-w3c-validator-warnings"]
        return "%s was found to be %s with %s errors and %s warnings." " see: %s" % (
            inp,
            status,
            errorcount,
            warningcount,
            url,
        )
Exemple #21
0
def dinner():
    """dinner - WTF IS FOR DINNER"""
    try:
        page = http.open(random_url)
    except (http.HTTPError, http.URLError) as e:
        return "Could not get recipe: {}".format(e)
    url = page.geturl()

    try:
        data = get_data(url)
    except ParseError as e:
        return "Could not parse recipe: {}".format(e)

    name = data["name"].strip().upper()
    text = random.choice(phrases).format(name)

    if censor:
        text = text.replace("F**K", "F**K")

    return "{} - {}".format(text, web.try_isgd(url))
Exemple #22
0
def recipe(inp):
    """recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided"""
    if inp:
        # get the recipe URL by searching
        try:
            search = http.get_soup(search_url, query=inp.strip())
        except (http.HTTPError, http.URLError) as e:
            return "Could not get recipe: {}".format(e)

        # find the list of results
        result_list = search.find('div', {'class': 'found_results'})

        if result_list:
            results = result_list.find_all('div', {'class': 'recipe_result'})
        else:
            return "No results"

        # pick a random front page result
        result = random.choice(results)

        # extract the URL from the result
        url = base_url + result.find('div', {
            'class': 'image-wrapper'
        }).find('a')['href']

    else:
        # get a random recipe URL
        try:
            page = http.open(random_url)
        except (http.HTTPError, http.URLError) as e:
            return "Could not get recipe: {}".format(e)
        url = page.geturl()

    # use get_data() to get the recipe info from the URL
    try:
        data = get_data(url)
    except ParseError as e:
        return "Could not parse recipe: {}".format(e)

    name = data["name"].strip()
    return u"Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url))
Exemple #23
0
def recipe(text):
    """recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided"""
    if text:
        # get the recipe URL by searching
        try:
            search = http.get_soup(search_url, query=text.strip())
        except (http.HTTPError, http.URLError) as e:
            return "Could not get recipe: {}".format(e)

        # find the list of results
        result_list = search.find('div', {'class': 'found_results'})

        if result_list:
            results = result_list.find_all('div', {'class': 'recipe_result'})
        else:
            return "No results"

        # pick a random front page result
        result = random.choice(results)

        # extract the URL from the result
        url = base_url + result.find('div', {'class': 'image-wrapper'}).find('a')['href']

    else:
        # get a random recipe URL
        try:
            page = http.open(random_url)
        except (http.HTTPError, http.URLError) as e:
            return "Could not get recipe: {}".format(e)
        url = page.geturl()

    # use get_data() to get the recipe info from the URL
    try:
        data = get_data(url)
    except ParseError as e:
        return "Could not parse recipe: {}".format(e)

    name = data["name"].strip()
    return "Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url))
Exemple #24
0
def fill_schedule(db=None):
    schedule_url = "http://espn.go.com/racing/schedule"
    page = http.open(schedule_url)
    soup = BeautifulSoup(page)
    table = soup.find('table', class_='tablehead')
    trs = table.find_all('tr')
    for tr in trs:
        if tr.attrs['class'][0] == "stathead" or tr.attrs['class'][0] == "colhead":
            continue
        tds = tr.find_all('td')
        timestamp_br = tds[0].find('br')
        timestamp_br.extract()
        timestamp = tds[0].contents
        timestamp = timestamp[0] + " " + timestamp[1]
        timestamp = timestamp.replace('&nbsp;', ' ').replace('Noon', '12:00 PM').replace(u'\xa0', ' ').replace(' ET', '') + ' 2014'
        timestamp = time.strptime(timestamp, "%a, %b %d %I:%M %p %Y")
        timestamp = time.mktime(timestamp)
        name = tds[1].find('b').find(text=True)
        name_b = tds[1].find('b')
        name_b.extract()
        speedway = tds[1].find(text=True).replace('<br>', '')
        network = tds[2].find(text=True)
        if network is None:
            if tds[2].find('a').attrs['href'] == "http://espn.go.com/espntv/onair/index":
                network = "ESPN"
            elif tds[2].find('a').attrs['href'] == "http://espn.go.com/abcsports/":
                network = "ABC"
        else:
            network = str(network).strip()

        data = {
            "timestamp": timestamp,
            "name": name,
            "speedway": speedway,
            "network": network
        }
        upsert_raceschedule(db, data)
Exemple #25
0
def get_title(url):
    url = urlnorm.normalize(url.encode('utf-8'))
    url = url.decode('utf-8')
    # add http if its missing
    if not url.startswith("http"):
        url = "http://" + url
    try:
        # get the title
        request = http.open(url)
        real_url = request.geturl()
        text = request.read()
        text = text.decode('utf8')
        match = titler.search(text)
        title = match.group(1)
    except:
        return "Could not parse URL! Are you sure its valid?"

    title = http.unescape(title)

    # if the url has been redirected, show us
    if real_url == url:
        return title
    else:
        return u"%s [%s]" % (title, real_url)
Exemple #26
0
def expand(inp, say=None):
    "expand <url> -- expands short URLs"
    try:
        return (http.open(inp).url.strip())
    except http.URLError, e:
        return ("Unable to expand")
Exemple #27
0
def tiny_url(inp, say=''):
    try:
        say(http.open(inp.group()).url.strip())
    except http.URLError as e:
        pass
Exemple #28
0
def tinyurl(match):
    try:
        return http.open(match.group()).url.strip()
    except http.URLError, e:
        pass
Exemple #29
0
def unshorten(match, say=None):
    try:
        return http.open(match.group(), get_method="HEAD").url.strip()
    except http.URLError:
        pass