Пример #1
0
def google(inp):
    '''.g/.google <query> -- returns first google search result'''

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' %
                      (parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'no results found'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])

    if len(content) == 0:
        content = "No description available"
    else:
        content = http.html.fromstring(content).text_content()

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)
    out = ' '.join(out.split())

    if len(out) > 300:
        out = out[:out.rfind(' ')] + '..."'

    return out
Пример #2
0
def google(inp):
    "google <query> -- Returns first google search result for <query>."

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' %
                      (parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)

    return out
Пример #3
0
def fmt(post):
    permalink = post['permalink']

    if 'fact_check_claim' in post:
        claim = post['fact_check_claim']
        if post['taxonomies'].get('fact_check_category') == ['Fake News']:
            status = 'Fake News'
        else:
            status = post['taxonomies']['fact_check_rating'][0]
    else:
        content = post['content']
        m = re.search(r"(?:Claim|Glurge|Legend|FACT CHECK): (.*)", content)
        if m:
            claim = m.group(1).strip()
        else:
            claim = content.split('\n')[0]
            print("???", claim)
        if claim == 'Claim':
            print("!!!", content)
        m = re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED|CORRECTLY ATTRIBUTED|(?<=Status:).*", content)
        if m:
            status = m.group(0)
        else:
            status = '???'

    claim = re.sub(r"[\s\xa0]+", " ", http.unescape(claim)).strip()   # compress whitespace
    status = re.sub(r"[\s\xa0]+", " ", http.unescape(status)).title().strip()

    if len(claim) > 300:
        claim = claim[:300] + '...'

    return "Claim: {0} Status: {1} {2}".format(claim, status, permalink)
Пример #4
0
def google(inp):
    "google <query> -- Returns first google search result for <query>."

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' % (
                      parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)

    return out
Пример #5
0
def goturlforscp(url,scpid,db_global,timemon,inten):
    scpid = url.replace("http://www.scp-wiki.net/scp-","") #gets scpid as string
    for data in badlet:
        if data in scpid:
            return("Didnt get a SCP page! Instead got: "+scpid)
    iscpid = int(scpid) #lets see if we can get it as a int for range testing
    if type(iscpid)==type(1):
        if not scpid.endswith("-j") and iscpid in range(1,1000): #item name url to get our SCP's item name from
            urltitle = "http://www.scp-wiki.net/scp-series"
        if not scpid.endswith("-j") and iscpid in range(1000,2000):
            urltitle = "http://www.scp-wiki.net/scp-series-2"
    titler = re.compile(r'Item #:(.+?)Object Class:(.+?)Special Containment Procedures:.*?Description:(.+?)</p>')
    title = parse(url,titler)#.replace("</p>","").replace("<p>","") #^- regex to get the data we want. <- passes the url and regex to the parser
    if type(title)==type(("","")): #check to see if we get an error from our parser
        (itemid,classtype,desc) = title
    desc = http.unescape(desc).replace("  "," ")
    classtype = http.unescape(classtype).replace(" ","")
    itemid = http.unescape(itemid).replace(" ","")
    if not type(title)==type(("","")):
        (itemid,classtype,desc) = ("","","") #if we get an error then set our other values to nothing
    if not scpid.endswith("-j"): #checking if we have a joke SCP
        itemname = http.unescape(parse2(urltitle,re.compile('<li><a href="/scp-'+str(scpid)+'">SCP-'+str(scpid)+'</a> - (.+?)</li>'))) #if we dont then lets get the item name
    if scpid.endswith("-j"):
        itemname = "" #if we do then lets not show an item name as we cant get one
    db_global.execute("insert or replace into scpwiki(itemname, itemid, classtype, desc, url, time) values (?, ?, ?, ?, ?, ?)",(http.unescape(itemname),http.unescape(itemid),http.unescape(classtype),http.unescape(desc),url,timemon))
    db_global.commit()
    return(("Item Name:\x02\x1f"+itemname+"\x02\x1f Item #:\x02\x1f"+itemid+"\x1f\x02 Class:'\x02\x1f"+classtype+"\x1f\x02'. "+desc).replace("  "," "))
Пример #6
0
def google(inp):
    ".google <query> -- Returns first google search result for <query>."

    parsed = api_get("web", inp)
    if not 200 <= parsed["responseStatus"] < 300:
        raise IOError("error searching for pages: %d: %s" % (parsed["responseStatus"], ""))
    if not parsed["responseData"]["results"]:
        return "No results found."

    result = parsed["responseData"]["results"][0]

    title = http.unescape(result["titleNoFormatting"])
    content = http.unescape(result["content"])

    if len(content) == 0:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()

    out = '%s -- \x02%s\x02: "%s"' % (result["unescapedUrl"], title, content)

    out = " ".join(out.split())

    if len(out) > 300:
        out = out[: out.rfind(" ")] + "..."

    return out
Пример #7
0
def google(inp):
    '''.g/.google <query> -- returns first google search result'''

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' % (
                parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'no results found'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])

    if len(content) == 0:
        content = "No description available"
    else:
        content = http.html.fromstring(content).text_content()

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)
    out = ' '.join(out.split())

    if len(out) > 300:
        out = out[:out.rfind(' ')] + '..."'

    return out
Пример #8
0
def hackernews(match):
    base_api = 'https://hacker-news.firebaseio.com/v0/item/'
    entry = http.get_json(base_api + match.group(1) + ".json")

    if entry['type'] == "story":
    	entry['title'] = http.unescape(entry['title'])
        return u"{title} by {by} with {score} points and {descendants} comments ({url})".format(**entry)

    if entry['type'] == "comment":
	entry['text'] = http.unescape(entry['text'].replace('<p>', ' // '))
        return u'"{text}" -- {by}'.format(**entry)
Пример #9
0
def hackernews(match):
    base_api = "https://hacker-news.firebaseio.com/v0/item/"
    entry = http.get_json(base_api + match.group(1) + ".json")

    if entry["type"] == "story":
        entry["title"] = http.unescape(entry["title"])
        return "{title} by {by} with {score} points and {descendants} comments ({url})".format(
            **entry)

    if entry["type"] == "comment":
        entry["text"] = http.unescape(entry["text"].replace("<p>", " // "))
        return '"{text}" -- {by}'.format(**entry)
Пример #10
0
def hackernews(match):
    base_api = 'https://hacker-news.firebaseio.com/v0/item/'
    entry = http.get_json(base_api + match.group(1) + ".json")

    if entry['type'] == "story":
        entry['title'] = http.unescape(entry['title'])
        return u"{title} by {by} with {score} points and {descendants} comments ({url})".format(
            **entry)

    if entry['type'] == "comment":
        entry['text'] = http.unescape(entry['text'].replace('<p>', ' // '))
        return u'"{text}" -- {by}'.format(**entry)
Пример #11
0
def hitze(inp, say=None):
  hitzelist = [
    "ahahaaha",
    "lol",
    "heh",
    "omg.",
    "uugh",
    "why..",
    "lol pcgaming",
    "rip",
    "sperg",
    "omg hyle",
  ]

  subreddit = [
    "pics",
    "wtf",
    "cityporn",
    "gaming",
    "minecraftcirclejerk",
    "gifs",
    "nba",
  ]

  noSelf = False
  while noSelf == False:
    jsonData = http.get_json('http://www.reddit.com/r/' + random.choice(subreddit) + '/.json')
    potentialURL = random.choice(jsonData['data']['children'])['data']['url']
    if 'reddit' in potentialURL:
      noSelf = False
    else:
      noSelf = True

  say("<hitz> " + potentialURL + " " + http.unescape(random.choice(hitzelist)))
Пример #12
0
def movietime(inp):
  ".movietime <query> -- return a random movie from r/fullmoviesonyoutube or search if you supply a query"
  if inp:
    jsonData = http.unescape(http.get_json('http://www.reddit.com/r/fullmoviesonyoutube/search.json?q={}&restrict_sr=on&sort=new&limit=1'.format(inp)))
    try:
      movie = jsonData['data']['children'][0]['data']
    except IndexError, e:
      return 'no results'
Пример #13
0
def page(inp,host): #gets our SCP url from the first google search result. this allows for queries instead of just SCP item ids.
    url = 'http://ajax.googleapis.com/ajax/services/search/web?q='+str("site:scp-wiki.net "+inp).replace(" ","%20")+'&v=1.0&safe=off&client=google-csbe&userip='+host
    parsed = http.get_json(url)
    #print(url)
    if not parsed['responseStatus']==200:
        raise IOError('error searching for pages: %s' % (parsed['responseStatus']))
    if not parsed['responseData']['results']:
        return 'No results found.'
    result = parsed['responseData']['results'][0]
    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])
    if len(content) == 0:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
    out = '%s' % (result['unescapedUrl'])
    out = ' '.join(out.split())
    return out
Пример #14
0
def google(inp,db=None,chan=None):
    """google <query> -- Returns first google search result for <query>."""
    trimlength = database.get(db,'channels','trimlength','chan',chan)
    if not trimlength: trimlength = 9999 

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]
    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])

    if not content: content = "No description available."
    else: content = http.html.fromstring(content.replace('\n', '')).text_content()

    return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
Пример #15
0
def refresh_cache():
    """Gets a page of random FMLs and puts them into a dictionary"""
    page = http.get(urljoin(base_url, 'random'))
    soup = BeautifulSoup(page)

    for e in soup.findAll('div', {'class': 'post article'}):
        id = int(e['id'])
        text = ''.join(e.find('p').findAll(text=True))
        text = http.unescape(text)
        fml_cache[id] = text
Пример #16
0
def reddit_get(subreddit):
  if subreddit not in queue or len(queue[subreddit]['posts']) == 0 or time.time() - queue[subreddit]['time'] > 400:
    jsonData = http.get_json('http://www.reddit.com/r/' + subreddit + '/.json')
    queue[subreddit] = {
        'posts': [http.unescape(d['data']['title'].lower()) for d in jsonData['data']['children']],
        'time': time.time()
    }
  p = queue[subreddit]['posts']
  c = random.choice(p)
  p.remove(c)
  return c
Пример #17
0
def google(inp,input=None):
    ".google <query> -- Returns first google search result for <query>."
    url = 'http://ajax.googleapis.com/ajax/services/search/web?q='+str(inp).replace(" ","%20")+'&v=1.0&safe=off&client=google-csbe&userip='+input.host
    parsed = http.get_json(url)
    #print(url)
    if not parsed['responseStatus']==200:
        raise IOError('error searching for pages: %s' % (parsed['responseStatus']))
    if not parsed['responseData']['results']:
        return 'No results found.'
    result = parsed['responseData']['results'][0]
    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])
    if len(content) == 0:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)
    out = ' '.join(out.split())
    if len(out) > 300:
        out = out[:out.rfind(' ')] + '...'
    return out
Пример #18
0
def twitter_status(match, bot=None):
  global token
  id = match.group(2)

  init(bot.config['api_keys']['twitter_key'], bot.config['api_keys']['twitter_secret'])
  twitter = Twython(bot.config['api_keys']['twitter_key'], access_token=token)
  result = twitter.show_status(id=id)

  at_name = result['user']['screen_name']
  full_name = result['user']['name']
  tweet_text = http.unescape(result['text'].replace('\n', ' '))

  return "\x02@" + at_name + " \x02(" + full_name + ") - " + tweet_text
Пример #19
0
def google(text):
    """google <query> -- Returns first google search result for <query>."""

    parsed = api_get('web', text)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = formatting.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = formatting.truncate_str(content, 150)

    return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
Пример #20
0
def gse(inp):
    """gsearch <query> -- Returns first google search result for <query>."""

    if inp == "!!info":
        return "FurCode gsearch.py for RoboCop Classic.";

    eval = urllib.quote(inp)
    parsed = api_get(eval)

    result = parsed['items'][0]

    title = http.unescape(result['title'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['snippet'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    return u'{} -- \x02{}\x02: "{}"'.format(result['link'], title, content)
Пример #21
0
def google(inp, db=None, chan=None):
    """google <query> -- Returns first google search result for <query>."""
    trimlength = database.get(db, 'channels', 'trimlength', 'chan', chan)
    if not trimlength: trimlength = 9999

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(
            parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]
    title = http.unescape(result['titleNoFormatting'])
    content = http.unescape(result['content'])

    if not content: content = "No description available."
    else:
        content = http.html.fromstring(content.replace('\n',
                                                       '')).text_content()

    return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title,
                                            content)
Пример #22
0
def fmt(post):
    permalink = post["permalink"]

    if "fact_check_claim" in post:
        claim = post["fact_check_claim"]
        if post["taxonomies"].get("fact_check_category") == ["Fake News"]:
            status = "Fake News"
        else:
            status = post["taxonomies"]["fact_check_rating"][0]
    else:
        content = post["content"]
        m = re.search(r"(?:Claim|Glurge|Legend|FACT CHECK): (.*)", content)
        if m:
            claim = m.group(1).strip()
        else:
            claim = content.split("\n")[0]
            print("???", claim)
        if claim == "Claim":
            print("!!!", content)
        m = re.search(
            r"FALSE|TRUE|MIXTURE|UNDETERMINED|CORRECTLY ATTRIBUTED|(?<=Status:).*",
            content,
        )
        if m:
            status = m.group(0)
        else:
            status = "???"

    claim = re.sub(r"[\s\xa0]+", " ",
                   http.unescape(claim)).strip()  # compress whitespace
    status = re.sub(r"[\s\xa0]+", " ", http.unescape(status)).title().strip()

    if len(claim) > 300:
        claim = claim[:300] + "..."

    return "Claim: {0} Status: {1} {2}".format(claim, status, permalink)
Пример #23
0
def google(text):
    """google <query> -- Returns first google search result for <query>."""

    parsed = api_get('web', text)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(
            parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = formatting.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = formatting.truncate_str(content, 150)

    return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title,
                                           content)
Пример #24
0
def urlparser(match, say=None):
    url = urlnorm.normalize(match.group().encode('utf-8'))
    if url[:7] != "http://":
        if url[:8] != "https://":
            url = "http://" + url
    for x in ignored_urls:
        if x in url:
            return
    title = parse(url)
    if title == "fail":
        return
    title = http.unescape(title)
    realurl = http.get_url(url)
    if realurl == url:
        say("(Link) %s" % title)
        return
    else:
        say("(Link) %s [%s]" % (title, realurl))
        return
Пример #25
0
def twitter_status(match, bot=None):
    global token
    result = {}

    if match.group("user").lower() == "realdonaldtrump":
        init(bot.config["api_keys"]["twitter_key"], bot.config["api_keys"]["twitter_secret"])
        twitter = Twython(bot.config["api_keys"]["twitter_key"], access_token=token)
        result = choice(twitter.get_user_timeline(screen_name="BadSonicFanArt"))
    else:
        id = match.group(2)

        init(bot.config["api_keys"]["twitter_key"], bot.config["api_keys"]["twitter_secret"])
        twitter = Twython(bot.config["api_keys"]["twitter_key"], access_token=token)
        result = twitter.show_status(id=id)

    at_name = result["user"]["screen_name"]
    full_name = result["user"]["name"]
    tweet_text = http.unescape(result["text"].replace("\n", " "))

    return "\x02@" + at_name + " \x02(" + full_name + ") - " + tweet_text
Пример #26
0
def get_title(url):
    url = urlnorm.normalize(url.encode('utf-8'))
    url = url.decode('utf-8')
    # add http if its missing
    if not url.startswith("http"):
        url = "http://" + url
    try:
        # get the title
        request = http.open(url)
        real_url = request.geturl()
        text = request.read()
        text = text.decode('utf8')
        match = titler.search(text)
        title = match.group(1)
    except:
        return "Could not parse URL! Are you sure its valid?"

    title = http.unescape(title)

    # if the url has been redirected, show us
    if real_url == url:
        return title
    else:
        return u"%s [%s]" % (title, real_url)
Пример #27
0
        return 'error: unknown %s' % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    text = http.unescape(tweet["text"]).replace('\n', ' ')
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S',
                    strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "\x02%s\x02: %s" % (screen_name, text)


@hook.api_key('twitter')
@hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)')
def show_tweet(match, api_key=None):
    return twitter(match.group(3), api_key)
Пример #28
0
        return 'error: unknown %s' % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    text = http.unescape(tweet["text"]).replace('\n', ' ')
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S',
                    strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "\x02%s\x02: %s" % (screen_name, text)


@hook.api_key('twitter')
@hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)')
def show_tweet(match, api_key=None):
    return twitter(match.group(3), api_key)
Пример #29
0
def twitter(inp, api_key=None):
    """.twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> - Get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result."""
    if not isinstance(api_key, dict) or any(key not in api_key for key in
                                            ('consumer', 'consumer_secret', 'access', 'access_secret')):
        return "error: api keys not set"

    getting_id = False
    doing_search = False
    index_specified = False

    if re.match(r'^\d+$', inp):
        getting_id = True
        request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp
    else:
        try:
            inp, index = re.split('\s+', inp, 1)
            index = int(index)
            index_specified = True
        except ValueError:
            index = 0
        if index < 0:
            index = 0
        if index >= 20:
            return 'error: only supports up to the 20th tweet'

        if re.match(r'^#', inp):
            doing_search = True
            request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(inp)
        else:
            request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s" % inp

    try:
        tweet = http.get_json(request_url, oauth=True, oauth_keys=api_key)
    except http.HTTPError as e:
        errors = {400: 'bad request (ratelimited?)',
                  401: 'unauthorized (private)',
                  403: 'forbidden',
                  404: 'invalid user/id',
                  500: 'twitter is broken',
                  502: 'twitter is down ("getting upgraded")',
                  503: 'twitter is overloaded (lol, RoR)',
                  410: 'twitter shut off api v1.'}
        if e.code == 404:
            return 'error: invalid ' + ['username', 'tweet id'][getting_id]
        if e.code in errors:
            return 'error: ' + errors[e.code]
        return 'error: unknown %s' % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    text = http.unescape(tweet["text"])
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S',
                    strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "%s: %s [%s]" % (screen_name, text, time)
Пример #30
0
def twitter(inp, api_key=None):
    ".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " "get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result"

    if not isinstance(api_key, dict) or any(
            key not in api_key for key in ("consumer", "consumer_secret",
                                           "access", "access_secret")):
        return "error: api keys not set"

    getting_id = False
    doing_search = False
    index_specified = False

    if re.match(r"^\d+$", inp):
        getting_id = True
        request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp
    else:
        try:
            inp, index = re.split("\s+", inp, 1)
            index = int(index)
            index_specified = True
        except ValueError:
            index = 0
        if index < 0:
            index = 0
        if index >= 20:
            return "error: only supports up to the 20th tweet"

        if re.match(r"^#", inp):
            doing_search = True
            request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(
                inp)
        else:
            request_url = (
                "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s"
                % inp)

    try:
        tweet = http.get_json(request_url,
                              oauth=True,
                              oauth_keys=api_key,
                              tweet_mode="extended")
    except http.HTTPError as e:
        errors = {
            400: "bad request (ratelimited?)",
            401: "unauthorized",
            403: "forbidden",
            404: "invalid user/id",
            500: "twitter is broken",
            502: 'twitter is down ("getting upgraded")',
            503: "twitter is overloaded (lol, RoR)",
            410: "twitter shut off api v1.",
        }
        if e.code == 404:
            return "error: invalid " + ["username", "tweet id"][getting_id]
        if e.code in errors:
            return "error: " + errors[e.code]
        return "error: unknown %s" % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return "error: no results"

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return "error: not that many tweets found"

    if "retweeted_status" in tweet:
        rt = tweet["retweeted_status"]
        rt_text = http.unescape(rt["full_text"]).replace("\n", " ")
        text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text)
    else:
        text = http.unescape(tweet["full_text"]).replace("\n", " ")
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime("%Y-%m-%d %H:%M:%S",
                    strptime(time, "%a %b %d %H:%M:%S +0000 %Y"))

    return "%s \x02%s\x02: %s" % (time, screen_name, text)
Пример #31
0
        return 'error: unknown %s' % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    text = http.unescape(tweet["text"])
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S',
                    strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "%s \x02%s\x02: %s" % (time, screen_name, text)


@hook.api_key('twitter')
@hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)')
def show_tweet(match, api_key=None):
    return twitter(match.group(3), api_key)
Пример #32
0
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    if 'retweeted_status' in tweet:
        rt = tweet["retweeted_status"]
        rt_text = http.unescape(rt["full_text"]).replace('\n', ' ')
        text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text)
    else:
        text = http.unescape(tweet["full_text"]).replace('\n', ' ')
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S',
                    strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "%s \x02%s\x02: %s" % (time, screen_name, text)


@hook.api_key('twitter')
@hook.regex(r'https?://(mobile\.)?twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(?P<id>\d+)')
def show_tweet(match, api_key=None):
Пример #33
0
                502: 'twitter is down ("getting upgraded")',
                503: 'twitter is overloaded (lol, RoR)',
                410: 'twitter shut off api v1.' }
        if e.code == 404:
            return 'error: invalid ' + ['username', 'tweet id'][getting_id]
        if e.code in errors:
            return 'error: ' + errors[e.code]
        return 'error: unknown %s' % e.code

    if doing_search:
        try:
            tweet = tweet["statuses"]
            if not index_specified:
                index = random.randint(0, len(tweet) - 1)
        except KeyError:
            return 'error: no results'

    if not getting_id:
        try:
            tweet = tweet[index]
        except IndexError:
            return 'error: not that many tweets found'

    text = http.unescape(tweet["text"])
    screen_name = tweet["user"]["screen_name"]
    time = tweet["created_at"]

    time = strftime('%Y-%m-%d %H:%M:%S', strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))

    return "%s %s: %s" % (time, screen_name, text.replace('\n', ' '))
Пример #34
0
    jsonData = http.get_json('http://www.reddit.com/r/' + random.choice(subreddit) + '/.json')
    potentialURL = random.choice(jsonData['data']['children'])['data']['url']
    if 'reddit' in potentialURL:
      noSelf = False
    else:
      noSelf = True

  say("<hitz> " + potentialURL + " " + http.unescape(random.choice(hitzelist)))

@hook.command
def var(inp, say=None):
  subreddit =  [
    'Games',
  ]
  say('<Var> ' + reddit_get(random.choice(subreddit)))


@hook.command(autohelp=False)
def movietime(inp):
  ".movietime <query> -- return a random movie from r/fullmoviesonyoutube or search if you supply a query"
  if inp:
    jsonData = http.unescape(http.get_json('http://www.reddit.com/r/fullmoviesonyoutube/search.json?q={}&restrict_sr=on&sort=new&limit=1'.format(inp)))
    try:
      movie = jsonData['data']['children'][0]['data']
    except IndexError, e:
      return 'no results'
  else:
    jsonData = http.get_json('http://www.reddit.com/r/fullmoviesonyoutube.json')
    movie = random.choice(jsonData['data']['children'])['data']
  return http.unescape(movie['title'] + ' ' + movie['url'])