コード例 #1
0
def cs(kenni, input):
    '''.cs <callsign> -- queries qth.com for call sign information'''
    cs = input.group(2).upper()
    try:
        link = "http://www.qth.com/callsign.php?cs=" + uc.decode(web.quote(cs))
    except Exception as e:
        print(e)
        return kenni.say('Failed to obtain data from qth.com')
    page = web.get(link)
    info = re_look.findall(page)
    more_info = re_more.findall(page)
    if info and more_info:
        info = info[0]
        name = info[0]
        name = re_tag.sub(' ', info[0]).strip()
        address = info[1].split('<br>')
        address = ', '.join(address[1:])
        address = address.strip()
        extra = dict()
        for each in more_info:
            extra[each[0].strip()] = re_tag.sub('', each[1].strip()).strip()
        response = '(%s) ' % (web.quote(cs))
        response += 'Name: %s, Address: %s. '  # More information is available at: %s'
        response = response % (uc.decode(name), uc.decode(address))
        for each in more_info:
            temp = re_tag.sub('', each[1].strip())
            if not temp:
                temp = 'N/A'
            response += '%s: %s. ' % (each[0].strip(), temp)
        response += 'More information is available at: %s' % (link)
    else:
        response = 'No matches found.'
    kenni.say(response)
コード例 #2
0
ファイル: apy.py プロジェクト: sine-over-cosine/phenny
def apertium_perword(phenny, input):
    '''Perform APy's tagger, morph, translate, and biltrans functions on individual words.'''
    cmd, cmd_error = strict_check(r'(' + langRE + r')\s+\((.*)\)\s+(.*)',
                                  input.group(1), apertium_perword)
    valid_funcs = {'tagger', 'disambig', 'biltrans', 'translate', 'morph'}

    if cmd_error:
        phenny.say(cmd_error)
        return

    # validate requested functions
    funcs = cmd.group(2).split(' ')
    if not set(funcs) <= valid_funcs:
        raise GrumbleError(
            'The requested functions must be from the set {:s}.'.format(
                str(valid_funcs)))

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    try:
        response = opener.open(
            '{:s}/perWord?lang={:s}&modes={:s}&q={:s}'.format(
                phenny.config.APy_url, web.quote(cmd.group(1)),
                '+'.join(funcs), web.quote(cmd.group(3)))).read()
    except urllib.error.HTTPError as error:
        handle_error(error)

    jsdata = json.loads(response.decode('utf-8'))
    for word in jsdata:
        phenny.say(word['input'] + ':')
        for func in funcs:
            phenny.say('  {:9s}: {:s}'.format(func, ' '.join(word[func])))
コード例 #3
0
def apertium_generate(phenny, input):
    """Use Apertium APY's generate functionality"""
    lang, text = input.groups()

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    constructed_url = APIanalyseURL + '/generate?lang=' + web.quote(lang)
    constructed_url += '&q=' + web.quote(text.strip())

    try:
        response = opener.open(constructed_url).read()
    except urllib.error.HTTPError as error:
        response = error.read()
        jobj = json.loads(response.decode('utf-8'))
        if 'explanation' in jobj:
            phenny.say('The following error occurred: ' + jobj['explanation'])
        else:
            phenny.say('An error occurred: ' + str(error))
        return

    jobj = json.loads(response.decode('utf-8'))
    messages = []
    for generation, original in jobj:
        messages.append(original + " → " + generation)

    more.add_messages(input.nick, phenny,
                      "\n".join(messages),
                      break_up=lambda x, y: x.split('\n'))
コード例 #4
0
ファイル: apy.py プロジェクト: sine-over-cosine/phenny
def translate(phenny, translate_me, input_lang, output_lang='en'):
    opener = urllib.request.build_opener()
    opener.addheaders = headers

    input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
    translate_me = web.quote(translate_me)

    try:
        apy_url = phenny.config.APy_url
    except:
        apy_url = 'http://apy.projectjj.com'

    try:
        response = opener.open(
            '{:s}/translate?q={:s}&langpair={:s}|{:s}'.format(
                apy_url, translate_me, input_lang, output_lang)).read()
    except urllib.error.HTTPError as error:
        handle_error(error)

    responseArray = json.loads(response.decode('utf-8'))

    if responseArray['responseData']['translatedText'] == []:
        raise GrumbleError(Apy_errorData)

    translated_text = responseArray['responseData']['translatedText']
    return translated_text
コード例 #5
0
def translate(text, input='auto', output='en'):
    raw = False
    if output.endswith('-raw'):
        output = output[:-4]
        raw = True

    #opener = urllib.request.build_opener()
    #opener.addheaders = [(
    #    'User-Agent', 'Mozilla/5.0' +
    #    '(X11; U; Linux i686)' +
    #    'Gecko/20071127 Firefox/2.0.0.11'
    #)]
    input = web.quote(input)
    output = web.quote(output.encode('utf-8'))
    text = web.quote(text.encode('utf-8'))

    result = web.get('http://translate.google.com/translate_a/t?' +
                     ('client=t&hl=en&sl=%s&tl=%s&multires=1' %
                      (input, output)) +
                     ('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text))

    while ',,' in result:
        result = result.replace(',,', ',null,')
    result = result.replace('[,', '[null,')
    data = json.loads(result)

    if raw:
        return str(data), 'en-raw'

    try:
        language = data[2]  # -2][0][0]
    except:
        language = '?'

    return ''.join(x[0] for x in data[0]), language
コード例 #6
0
ファイル: apy.py プロジェクト: sine-over-cosine/phenny
def apertium_generate(phenny, input):
    '''Use Apertium APy's generate functionality'''
    cmd, cmd_error = strict_check(r'(' + langRE + r')\s+(.*)', input.group(1),
                                  apertium_generate)

    if cmd_error:
        phenny.say(cmd_error)
        return

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    try:
        response = opener.open('{:s}/generate?lang={:s}&q={:s}'.format(
            phenny.config.APy_analyseURL, web.quote(cmd.group(1)),
            web.quote(cmd.group(2).strip()))).read()
    except urllib.error.HTTPError as error:
        handle_error(error)

    jobj = json.loads(response.decode('utf-8'))
    messages = []

    for generation, original in jobj:
        messages.append(original + '  →  ' + generation)

    more.add_messages(phenny, input.nick, messages)
コード例 #7
0
ファイル: apertium_translate.py プロジェクト: frankier/phenny
def apertium_generate(phenny, input):
    """Use Apertium APY's generate functionality"""
    lang, text = input.groups()

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    constructed_url = APIanalyseURL + '/generate?lang=' + web.quote(lang)
    constructed_url += '&q=' + web.quote(text.strip())

    try:
        response = opener.open(constructed_url).read()
    except urllib.error.HTTPError as error:
        response = error.read()
        jobj = json.loads(response.decode('utf-8'))
        if 'explanation' in jobj:
            phenny.say('The following error occurred: ' + jobj['explanation'])
        else:
            phenny.say('An error occurred: ' + str(error))
        return

    jobj = json.loads(response.decode('utf-8'))
    messages = []
    for generation, original in jobj:
        messages.append(original + " → " + generation)

    more.add_messages(input.nick, phenny,
                      "\n".join(messages),
                      break_up=lambda x, y: x.split('\n'))
コード例 #8
0
def urbandict(phenny, input):
    """.urb <word> - Search Urban Dictionary for a definition."""

    word = input.group(2)
    if not word:
        phenny.say(urbandict.__doc__.strip())
        return

    try:
        data = web.get(
            "http://api.urbandictionary.com/v0/define?term={0}".format(
                web.quote(word)))
        data = json.loads(data)
    except:
        raise GrumbleError(
            "Urban Dictionary slemped out on me. Try again in a minute.")

    results = data['list']

    if not results:
        phenny.say("No results found for {0}".format(word))
        return

    result = results[0]
    url = 'http://www.urbandictionary.com/define.php?term={0}'.format(
        web.quote(word))

    response = "{0} - {1}".format(result['definition'].strip()[:256], url)
    phenny.say(response)
コード例 #9
0
ファイル: urbandict.py プロジェクト: sc0tt/phenny
def urbandict(phenny, input):
    """.urb <word> - Search Urban Dictionary for a definition."""

    word = input.group(2)
    if not word:
        phenny.say(urbandict.__doc__.strip())
        return

    # create opener
    #opener = urllib.request.build_opener()
    #opener.addheaders = [
    #    ('User-agent', web.Grab().version),
    #    ('Referer', "http://m.urbandictionary.com"),
    #]

    try:
        data = web.get(
            "http://api.urbandictionary.com/v0/define?term={0}".format(
                web.quote(word)))
        data = json.loads(data)
    except:
        raise GrumbleError(
            "Urban Dictionary slemped out on me. Try again in a minute.")

    if data['result_type'] == 'no_results':
        phenny.say("No results found for {0}".format(word))
        return

    result = data['list'][0]
    url = 'http://www.urbandictionary.com/define.php?term={0}'.format(
        web.quote(word))

    response = "{0}: {1} - {2}".format(word, result['definition'].strip()[:256], url)
    phenny.say(response)
コード例 #10
0
ファイル: urbandict.py プロジェクト: telnoratti/phenny
def urbandict(phenny, input):
    """.urb <word> - Search Urban Dictionary for a definition."""

    word = input.group(2)
    if not word:
        phenny.say(urbandict.__doc__.strip())
        return

    # create opener
    #opener = urllib.request.build_opener()
    #opener.addheaders = [
    #    ('User-agent', web.Grab().version),
    #    ('Referer', "http://m.urbandictionary.com"),
    #]

    try:
        data = web.get(
            "http://api.urbandictionary.com/v0/define?term={0}".format(
                web.quote(word)))
        data = json.loads(data)
    except:
        raise GrumbleError(
            "Urban Dictionary slemped out on me. Try again in a minute.")

    if data['result_type'] == 'no_results':
        phenny.say("No results found for {0}".format(word))
        return

    result = data['list'][0]
    url = 'http://www.urbandictionary.com/define.php?term={0}'.format(
        web.quote(word))

    response = "{0} - {1}".format(result['definition'].strip()[:256], url)
    phenny.say(response)
コード例 #11
0
ファイル: test_apy.py プロジェクト: evo938938/gracie
    def test_analyze_generate(self, mock_addmsgs, mock_open):
        # analyze
        words = ['analyze', 'this']
        anas = [['{0}/{0}<tags>'.format(word), word] for word in words]
        self.input.group.return_value = 'eng ' + ' '.join(words)
        mock_open.return_value.read.return_value = bytes(dumps(anas), 'utf-8')
        apy.apertium_analyse(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/analyse?lang={:s}&q={:s}'.format(
            self.phenny.config.APy_analyseURL, 'eng', quote(' '.join(words))))
        msgs = ['{:s}  →  {:s}'.format(orig, ana) for ana, orig in anas]
        self.assertEqual(mock_addmsgs.call_args[0][2], msgs)
        self.reset_mocks(mock_open, mock_addmsgs)

        # generate
        gens = [['generate', '^generate<tags>$']]
        self.input.group.return_value = 'eng ^generate<tags>$'
        mock_open.return_value.read.return_value = bytes(dumps(gens), 'utf-8')
        apy.apertium_generate(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/generate?lang={:s}&q={:s}'.format(
            self.phenny.config.APy_analyseURL, 'eng', quote('^generate<tags>$')))
        msgs = ['{:s}  →  {:s}'.format(orig, gen) for gen, orig in gens]
        self.assertEqual(mock_addmsgs.call_args[0][2], msgs)
        self.reset_mocks(mock_open, mock_addmsgs)

        # bad input
        self.check_exceptions([' '.join(words), 'eng'], apy.apertium_analyse)
        self.check_exceptions([' '.join(words), 'eng'], apy.apertium_generate)
コード例 #12
0
ファイル: lastfm.py プロジェクト: qinmingyue/phenny
def tasteometer(phenny, input):
    input1 = input.group(2)
    if not input1 or len(input1) == 0:
        phenny.say("tasteometer: compares two users' musical taste")
        phenny.say("syntax: .taste user1 user2")
        return
    input2 = input.group(3)
    user1 = resolve_username(input1)
    if not user1:
        user1 = input1
    user2 = resolve_username(input2)
    if not user2:
        user2 = input2
    if not user2 or len(user2) == 0:
        user2 = resolve_username(input.nick)
        if not user2:
            user2 = input.nick
    try:
        req = web.get(
            "%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s"
            % (APIURL, web.quote(user1), web.quote(user2))
        )
    except web.HTTPError as e:
        if e.response.status_code == 400:
            phenny.say("uhoh, someone doesn't exist on last.fm, perhaps they need to set user")
            return
        else:
            phenny.say("uhoh. try again later, mmkay?")
            return
    root = etree.fromstring(req.encode("utf-8"))
    score = root.xpath("comparison/result/score")
    if len(score) == 0:
        phenny.say("something isn't right. have those users scrobbled?")
        return

    score = float(score[0].text)
    rating = ""
    if score >= 0.9:
        rating = "Super"
    elif score >= 0.7:
        rating = "Very High"
    elif score >= 0.5:
        rating = "High"
    elif score >= 0.3:
        rating = "Medium"
    elif score >= 0.1:
        rating = "Low"
    else:
        rating = "Very Low"

    artists = root.xpath("comparison/result/artists/artist/name")
    common_artists = ""
    names = []
    if len(artists) == 0:
        common_artists = ". they don't have any artists in common."
    else:
        list(map(lambda a: names.append(a.text), artists))
        common_artists = "and music they have in common includes: %s" % ", ".join(names)

    phenny.say("%s's and %s's musical compatibility rating is %s %s" % (user1, user2, rating, common_artists))
コード例 #13
0
def apertium_perword(phenny, input):
    '''Perform APy's tagger, morph, translate, and biltrans functions on individual words.'''
    cmd = strict_check(r'(' + langRE + r')\s+\((.*)\)\s+(.*)', input.group(2), apertium_perword)
    valid_funcs = {'tagger', 'disambig', 'biltrans', 'translate', 'morph'}

    # validate requested functions
    funcs = cmd.group(2).split(' ')
    if not set(funcs) <= valid_funcs:
        phenny.say('The requested functions must be from the set {:s}.'.format(str(valid_funcs)))
        return

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    try:
        response = opener.open('{:s}/perWord?lang={:s}&modes={:s}&q={:s}'.format(
            phenny.config.APy_url, web.quote(cmd.group(1)), '+'.join(funcs), web.quote(cmd.group(3)))).read()
    except urllib.error.HTTPError as error:
        handle_error(error)

    jsdata = json.loads(response.decode('utf-8'))
    for word in jsdata:
        phenny.say(word['input'] + ':')
        for func in funcs:
            phenny.say('  {:9s}: {:s}'.format(func, ' '.join(word[func])))
コード例 #14
0
ファイル: test_apy.py プロジェクト: goavki/phenny
    def test_analyze_generate(self, mock_addmsgs, mock_open):
        # analyze
        words = ['analyze', 'this']
        anas = [['{0}/{0}<tags>'.format(word), word] for word in words]
        self.input.group.return_value = 'eng ' + ' '.join(words)
        mock_open.return_value.read.return_value = bytes(dumps(anas), 'utf-8')
        apy.apertium_analyse(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/analyse?lang={:s}&q={:s}'.format(
            self.phenny.config.APy_analyseURL, 'eng', quote(' '.join(words))))
        msgs = ['{:s}  →  {:s}'.format(orig, ana) for ana, orig in anas]
        self.assertEqual(mock_addmsgs.call_args[0][2], msgs)
        self.reset_mocks(mock_open, mock_addmsgs)

        # generate
        gens = [['generate', '^generate<tags>$']]
        self.input.group.return_value = 'eng ^generate<tags>$'
        mock_open.return_value.read.return_value = bytes(dumps(gens), 'utf-8')
        apy.apertium_generate(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/generate?lang={:s}&q={:s}'.format(
            self.phenny.config.APy_analyseURL, 'eng', quote('^generate<tags>$')))
        msgs = ['{:s}  →  {:s}'.format(orig, gen) for gen, orig in gens]
        self.assertEqual(mock_addmsgs.call_args[0][2], msgs)
        self.reset_mocks(mock_open, mock_addmsgs)

        # bad input
        self.check_exceptions([' '.join(words), 'eng'], apy.apertium_analyse)
        self.check_exceptions([' '.join(words), 'eng'], apy.apertium_generate)
コード例 #15
0
ファイル: translate.py プロジェクト: KaiCode2/phenny
def translate(text, input='auto', output='en'): 
    raw = False
    if output.endswith('-raw'): 
        output = output[:-4]
        raw = True

    #opener = urllib.request.build_opener()
    #opener.addheaders = [(
    #    'User-Agent', 'Mozilla/5.0' + 
    #    '(X11; U; Linux i686)' + 
    #    'Gecko/20071127 Firefox/2.0.0.11'
    #)]
    input = web.quote(input)
    output = web.quote(output.encode('utf-8'))
    text = web.quote(text.encode('utf-8'))

    result = web.get('http://translate.google.com/translate_a/t?' +
        ('client=t&hl=en&sl=%s&tl=%s&multires=1' % (input, output)) + 
        ('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text))

    while ',,' in result: 
        result = result.replace(',,', ',null,')
    result = result.replace('[,', '[null,')
    data = json.loads(result)

    if raw: 
        return str(data), 'en-raw'

    try: language = data[2] # -2][0][0]
    except: language = '?'

    return ''.join(x[0] for x in data[0]), language
コード例 #16
0
ファイル: search.py プロジェクト: ZetaRift/CompuBot
def wikipedia_search(query):
    query = query.replace('!', '')
    query = web.quote(query)
    uri = 'https://en.wikipedia.org/w/api.php?action=query&list=search&continue=&srsearch=%s&format=json' % query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    wihits = jsonstring['query']['searchinfo']['totalhits']
    if wihits > 0:
        wititle = jsonstring['query']['search'][0]['title']
        wiwords = str(jsonstring['query']['search'][0]['wordcount'])
        wisearch = wititle.replace('!', '')
        wisearch = web.quote(wisearch)
        base_url = "https://en.wikipedia.org/wiki/" + wisearch
        return (wititle + " - " + wiwords + " words " + base_url)
コード例 #17
0
ファイル: search.py プロジェクト: JordanKinsley/PinkiePyBot
def wikipedia_search(query): 
    query = query.replace('!', '')
    query = web.quote(query)
    uri = 'https://en.wikipedia.org/w/api.php?action=query&list=search&continue=&srsearch=%s&format=json' % query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    wihits = jsonstring['query']['searchinfo']['totalhits']
    if wihits > 0:
        wititle = jsonstring['query']['search'][0]['title']
        wiwords = str(jsonstring['query']['search'][0]['wordcount'])
        wisearch = wititle.replace('!', '')
        wisearch = web.quote(wisearch)
        base_url = "https://en.wikipedia.org/wiki/"+wisearch
        return (wititle + " - " + wiwords + " words " + base_url)
コード例 #18
0
ファイル: functions.py プロジェクト: KaiCode2/phenny
def translate(translate_me, input_lang, output_lang='en'): 
	input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
	translate_me = web.quote(translate_me)
	
	response = get_page('api.apertium.org', '/json/translate?q=%s&langpair=%s|%s' % (translate_me, input_lang, output_lang))
	
	responseArray = json.loads(response)
	if int(responseArray['responseStatus']) != 200:
		raise GrumbleError(APIerrorHttp % (responseArray['responseStatus'], responseArray['responseDetails']))
	if responseArray['responseData']['translatedText'] == []:
		raise GrumbleError(APIerrorData)

	translated_text = responseArray['responseData']['translatedText']
	return translated_text
コード例 #19
0
def apertium_calccoverage(phenny, input):
    '''Calculate translation coverage for a language and a given input.'''
    cmd = strict_check(r'(' + langRE + r')\s+(.*)', input.group(2), apertium_calccoverage)

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    try:
        response = opener.open('{:s}/calcCoverage?lang={:s}&q={:s}'.format(
            phenny.config.APy_url, web.quote(cmd.group(1)), web.quote(cmd.group(2).strip()))).read()
    except urllib.error.HTTPError as error:
        handle_error(error)

    jsdata = json.loads(response.decode('utf-8'))
    phenny.say('Coverage is {:.1%}'.format(jsdata[0]))
コード例 #20
0
ファイル: urbandict.py プロジェクト: evo938938/gracie
def get_definition(phenny, word, to_user=None):
    data = web.get(API_URL.format(web.quote(word)))
    data = json.loads(data)

    results = data['list']

    if not results:
        phenny.say("No results found for {0}".format(word))
        return

    result = results[0]
    url = WEB_URL.format(web.quote(word))

    response = "{0} - {1}".format(result['definition'].strip()[:256], url)
    phenny.say(response, target=to_user)
コード例 #21
0
ファイル: url.py プロジェクト: auscompgeek/jenni
    def remote_call():
        pyurl = u'https://tumbolia.appspot.com/py/'
        code = 'import simplejson;'
        code += "req=urllib2.Request(%s, headers={'Accept':'*/*'});"
        code += "req.add_header('User-Agent', 'Mozilla/5.0');"
        code += "u=urllib2.urlopen(req);"
        code += "rtn=dict();"
        code += "rtn['headers'] = u.headers.dict;"
        code += "contents = u.read();"
        code += "con = str();"
        code += r'''exec "try: con=(contents).decode('utf-8')\n'''
        code += '''except: con=(contents).decode('iso-8859-1')";'''
        code += "rtn['read'] = con;"
        code += "rtn['url'] = u.url;"
        code += "rtn['geturl'] = u.geturl();"
        code += "print simplejson.dumps(rtn)"
        query = code % repr(uri)
        temp = web.quote(query)
        u = web.get(pyurl + temp)

        try:
            useful = json.loads(u)
            return True, useful
        except Exception, e:
            #print "%s -- Failed to parse json from web resource. -- %s" % (time.time(), str(e))
            return False, str(u)
コード例 #22
0
ファイル: validate.py プロジェクト: embolalia/jenni
def val(jenni, input):
    """Check a webpage using the W3C Markup Validator."""
    if not input.group(2):
        return jenni.reply("Nothing to validate.")
    uri = input.group(2)
    if not uri.startswith('http://'):
        uri = 'http://' + uri

    path = '/check?uri=%s;output=xml' % web.quote(uri)
    info = web.head('http://validator.w3.org' + path)

    result = uri + ' is '

    if isinstance(info, list):
        return jenni.say('Got HTTP response %s' % info[1])

    if info.has_key('X-W3C-Validator-Status'):
        result += str(info['X-W3C-Validator-Status'])
        if info['X-W3C-Validator-Status'] != 'Valid':
            if info.has_key('X-W3C-Validator-Errors'):
                n = int(info['X-W3C-Validator-Errors'].split(' ')[0])
                if n != 1:
                    result += ' (%s errors)' % n
                else: result += ' (%s error)' % n
    else: result += 'Unvalidatable: no X-W3C-Validator-Status'

    jenni.reply(result)
コード例 #23
0
ファイル: calc.py プロジェクト: downquark/phenny
def py(phenny, input): 
    query = input.group(2) or ""
    uri = 'http://tumbolia.appspot.com/py/'
    answer = web.get(uri + web.quote(query))
    if answer: 
        phenny.say(answer)
    else: phenny.reply('Sorry, no result.')
コード例 #24
0
def calculate(phenny, input):
    """Calculate things."""
    if not input.group(2):
        return phenny.reply("Nothing to calculate.")
    q = input.group(2)
    q = q.replace('\xcf\x95', 'phi')  # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi')  # utf-8 U+03C0
    q = q.replace('÷', '/')
    q = web.quote(q)
    uri = 'https://www.calcatraz.com/calculator/api?c=' + q
    answer = web.get(uri)
    if answer:
        answerindex = 1
        if (len(answer.split(";")) < 2):
            answerindex = 0
        answer = answer.split(";")[answerindex]
        answer = answer.replace('  ', '')
        #answer = ''.join(chr(ord(c)) for c in answer)
        #answer = answer.decode('utf-8')
        #answer = answer.replace('\\x26#215;', '*')
        #answer = answer.replace('\\x3c', '<')
        #answer = answer.replace('\\x3e', '>')
        #answer = answer.replace('<sup>', '^(')
        #answer = answer.replace('</sup>', ')')
        #answer = web.decode(answer)
        if re.compile('answer').match(answer):
            return phenny.say('Sorry, no result.')
        else:
            return phenny.say(answer)
    else:
        return phenny.say('Sorry, no result.')
コード例 #25
0
ファイル: apy.py プロジェクト: sine-over-cosine/phenny
def apertium_identlang(phenny, input):
    '''Identify the language for a given input.'''
    text, text_error = strict_check(r'.*', input.group(1), apertium_identlang)
    text = text.group(0)

    if text_error:
        phenny.say(text_error)
        return

    opener = urllib.request.build_opener()
    opener.addheaders = headers

    try:
        response = opener.open('{:s}/identifyLang?q={:s}'.format(
            phenny.config.APy_url, web.quote(text.strip()))).read()
        jsdata = json.loads(response.decode('utf-8'))
    except urllib.error.HTTPError as error:
        handle_error(error)

    messages = []

    for key, value in jsdata.items():
        messages.append(key + ' = ' + str(value))

    more.add_messages(phenny, input.nick, messages)
コード例 #26
0
ファイル: weather.py プロジェクト: mutantmonkey/phenny
def location(q):
    uri = 'https://nominatim.openstreetmap.org/search?{type}={query}&format=json'
    if q.isdigit():
        uri = uri . format(type = 'postalcode', query = web.quote(q))
    else:
        uri = uri . format(type = 'q', query = web.quote(q))
    results = web.get(uri)
    data = json.loads(results)

    if not data:
        return None, None

    latitude = float(data[0]['lat'])
    longitude = float(data[0]['lon'])

    return latitude, longitude
コード例 #27
0
ファイル: rule34.py プロジェクト: vtluug/phenny
def rule34(phenny, input):
    """.rule34 <query> - Rule 34: If it exists there is p**n of it."""

    q = input.group(2)
    if not q:
        phenny.say(rule34.__doc__.strip())
        return

    try:
        req = web.get(
            "http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(
                web.quote(q)))
    except:
        raise GrumbleError(
            "THE INTERNET IS F*****G BROKEN. Please try again later.")

    doc = lxml.html.fromstring(req)
    doc.make_links_absolute('http://rule34.xxx/')
    thumbs = doc.find_class('thumb')
    if len(thumbs) <= 0:
        phenny.reply("You just broke Rule 34! Better start uploading...")
        return

    try:
        link = thumbs[0].find('a').attrib['href']
    except AttributeError:
        raise GrumbleError(
            "THE INTERNET IS F*****G BROKEN. Please try again later.")

    response = '!!NSFW!! -> {0} <- !!NSFW!!'.format(link)
    phenny.reply(response)
コード例 #28
0
def derpibooru_search(query, phenny):
    query = query.replace('!', '')
    query = web.quote(query)
    if hasattr(phenny.config, 'derpibooru_key'):
        uri = 'https://derpibooru.org/search.json?q=' + query + '&key=' + phenny.config.derpibooru_key
    else:
        uri = 'https://derpibooru.org/search.json?q=' + query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    dhits = jsonstring['total']
    if dhits > 0:
        results = choice(jsonstring['search'])
        url = 'https:' + results['image']
        uploader = results['uploader']
        uploaded = results['created_at']
        try:
            import dateutil.parser
            isdateutil = True
            dt = dateutil.parser.parse(uploaded)
            timestamp1 = calendar.timegm(dt.timetuple())
            timestamp1 = time.gmtime(timestamp1)
            uploadedformat = time.strftime('%A %B %d, %Y at %I:%M:%S %p',timestamp1)
        except:
            isdateutil = False
        if isdateutil is True:
            return url + ' uploaded by ' + uploader + ' on ' + uploadedformat
        else:
            return url + ' uploaded by ' + uploader
    else:
        return
コード例 #29
0
ファイル: test_apy.py プロジェクト: evo938938/gracie
    def test_perword(self, mock_open):
        # valid perword functions
        words = ['two', 'words']
        funcs = ['tagger', 'morph']
        per = [
            {'input': 'two', 'tagger': ['two<tags>'], 'morph': ['two<tags1>', 'two<tags2>']},
            {'input': 'words', 'tagger': ['words<tags>'], 'morph': ['words<tags1>', 'words<tags2>']}
        ]
        self.input.group.return_value = 'fra ({:s}) {:s}'.format(' '.join(funcs), ' '.join(words))
        mock_open.return_value.read.return_value = bytes(dumps(per), 'utf-8')
        apy.apertium_perword(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/perWord?lang={:s}&modes={:s}&q={:s}'.format(
            self.phenny.config.APy_url, 'fra', '+'.join(funcs), quote(' '.join(words))))
        calls = []
        for word in per:
            calls.append(mock.call(word['input'] + ':'))
            for func in funcs:
                calls.append(mock.call('  {:9s}: {:s}'.format(func, ' '.join(word[func]))))
        self.assertEqual(self.phenny.say.call_args_list, calls)
        self.reset_mocks(self.phenny, mock_open)

        # bad input
        self.check_exceptions(['fra (tagger nonfunc) word'], apy.apertium_perword,
                              'invalid perword function')
        self.check_exceptions(['fra', 'fra (tagger)', '(tagger)', 'fra word',
                               '(tagger morph) word'], apy.apertium_perword)
コード例 #30
0
ファイル: url.py プロジェクト: bookfund/jenni
def unbitly(jenni, input):
    url = input.group(2)
    if not url:
        #return jenni.say('No URL provided')
        if input.sender in jenni.last_seen_uri:
            url = jenni.last_seen_uri[input.sender]
        else:
            return jenni.say('No URL provided')
    if not url.startswith(('http://', 'https://')):
        url = 'http://' + url
    pyurl = u'https://tumbolia.appspot.com/py/'
    code = "req=urllib2.Request(%s, headers={'Accept':'text/html'});"
    code += "req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1 "
    code += "rv:17.0) Gecko/20100101 Firefox/17.0'); u=urllib2.urlopen(req);"
    code += 'print u.geturl();'
    url = url.replace("'", r"\'")
    query = code % repr(url.strip())
    try:
        temp = web.quote(query)
        u = web.get(pyurl + temp)
    except:
        return jenni.say('Failed to grab URL: %s' % (url))
    if u.startswith(('http://', 'https://')):
        jenni.say(u)
    else:
        jenni.say('Failed to obtain final destination.')
コード例 #31
0
ファイル: wiktionary.py プロジェクト: LuccoJ/phenny
def etymology(phenny, word):
    ety_value = None
    try:
        opener = urllib.request.build_opener()
        opener.addheaders = [(
            'User-agent',
            'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17'
        )]
        bytes = opener.open(uri.format(web.quote(word)))
        html = bytes.read().decode('utf-8')
        ety_value = get_between_all(html, '">Etymology</span></h3>', '</p>')
        ety_value = " ".join(ety_value)
        ety_value = re.compile(r'<[^<]*?/?>').sub('', ety_value)
        ety_value = ety_value.replace('&#160;', '')
        ety_value = ety_value.replace('From ', '← ')
        ety_value = ety_value.replace(', from', ' ←')
        ety_value = ety_value.replace('from ', '← ')
        ety_value = word + ": " + ety_value.replace(".", '') + "."
        ety_value = r_sqrbracket.sub('', ety_value)

        if len(ety_value) > 300:
            ety_value = ety_value[:295] + " [...]"
    except:
        ety_value = None
    return ety_value
コード例 #32
0
ファイル: calc.py プロジェクト: embolalia/jenni
def wa(jenni, input):
    """Wolfram Alpha calculator"""
    if not input.group(2):
        return jenni.reply("No search term.")
    query = input.group(2).encode('utf-8')
    uri = 'http://tumbolia.appspot.com/wa/'
    try:
        answer = web.get(uri + web.quote(query.replace('+', '%2B')), 45)
    except timeout as e:
        return jenni.say('[WOLFRAM ERROR] Request timed out')
    if answer:
        answer = answer.decode('string_escape')
        answer = HTMLParser.HTMLParser().unescape(answer)
        #This might not work if there are more than one instance of escaped unicode chars
        #But so far I haven't seen any examples of such output examples from Wolfram Alpha
        match = re.search('\\\:([0-9A-Fa-f]{4})', answer)
        if match is not None:
            char_code = match.group(1)
            char = unichr(int(char_code, 16))
            answer = answer.replace('\:'+char_code, char)
        waOutputArray = string.split(answer, ";")
        if(len(waOutputArray) < 2):
            jenni.say('[WOLFRAM ERROR]'+answer)
        else:
            
            jenni.say('[WOLFRAM] ' + waOutputArray[0]+" = "+waOutputArray[1])
        waOutputArray = []
    else: jenni.reply('Sorry, no result.')
コード例 #33
0
ファイル: search.py プロジェクト: ZetaRift/CompuBot
def dictionary_search(query, phenny):
    if hasattr(phenny.config, 'wordnik_api_key'):
        query = query.replace('!', '')
        query = web.quote(query)
        try:
            uri = 'https://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
            rec_bytes = web.get(uri)
            jsonstring = json.loads(rec_bytes)
            dword = jsonstring[0]['word']
        except:
            try:
                query = query.lower()
                uri = 'https://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
                dword = jsonstring[0]['word']
            except:
                query = string.capwords(query)
                uri = 'https://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
        try:
            dword = jsonstring[0]['word']
        except:
            return None
        if dword:
            ddef = jsonstring[0]['text']
            dattr = jsonstring[0]['attributionText']
            dpart = jsonstring[0]['partOfSpeech']
            dpart = dpart.replace('-', ' ')
            dpart = string.capwords(dpart)
            return (dword + ' - ' + dpart + ' - ' + ddef + ' - ' + dattr)
    else:
        return 'Sorry but you need to set your wordnik_api_key in the config file.'
コード例 #34
0
ファイル: wikipedia.py プロジェクト: Nuruddinjr/phenny
def parse_wiki_page(url, term, section = None):
    try:
        web_url = web.quote(url).replace("%3A", ":", 1)
        html = str(web.get(web_url))
    except:
        return "A wiki page does not exist for that term."
    page = lxml.html.fromstring(html)
    if section is not None:
        text = page.find(".//span[@id='%s']" % section)

        if text is None:
            return "That subsection does not exist."
        text = text.getparent().getnext()

        #a div tag may come before the text
        while text.tag is not None and text.tag != "p":
            text = text.getnext()
        url += "#" + format_term_display(section)
    else:
        #Get first paragraph

        text = page.get_element_by_id('mw-content-text').find('p')

    sentences = text.text_content().split(". ")   
    sentence = '"' + sentences[0] + '"'

    maxlength = 430 - len((' - ' + url).encode('utf-8'))
    if len(sentence.encode('utf-8')) > maxlength: 
        sentence = sentence.encode('utf-8')[:maxlength].decode('utf-8', 'ignore')
        words = sentence[:-5].split(' ')
        words.pop()
        sentence = ' '.join(words) + ' [...]'

    return sentence + ' - ' + url
コード例 #35
0
ファイル: search.py プロジェクト: JordanKinsley/PinkiePyBot
def dictionary_search(query, phenny): 
    if hasattr(phenny.config, 'wordnik_api_key'):
        query = query.replace('!', '')
        query = web.quote(query)
        try:
            uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
            rec_bytes = web.get(uri)
            jsonstring = json.loads(rec_bytes)
            dword = jsonstring[0]['word']
        except:
            try:
                query = query.lower()
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
                dword = jsonstring[0]['word']
            except:
                query = string.capwords(query)
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
        try:
            dword = jsonstring[0]['word']
        except:
            return None
        if dword:
            ddef = jsonstring[0]['text']
            dattr = jsonstring[0]['attributionText']
            dpart = jsonstring[0]['partOfSpeech']
            dpart = dpart.replace('-', ' ')
            dpart = string.capwords(dpart)
            return (dword + ' - ' + dpart + ' - ' + ddef + ' - ' + dattr)
    else:
        return 'Sorry but you need to set your wordnik_api_key in the config file.'
コード例 #36
0
def parse_wiki_page(url, term, section=None):
    try:
        web_url = web.quote(url).replace("%3A", ":", 1)
        html = str(web.get(web_url))
    except:
        return "A wiki page does not exist for that term."
    page = lxml.html.fromstring(html)
    if section is not None:
        text = page.find(".//span[@id='%s']" % section)

        if text is None:
            return "That subsection does not exist."
        text = text.getparent().getnext()

        content_tags = ["p", "ul", "ol"]
        #a div tag may come before the text
        while text.tag is not None and text.tag not in content_tags:
            text = text.getnext()
        url += "#" + format_term_display(section)
    else:
        #Get first paragraph
        text = page.get_element_by_id('mw-content-text').find('.//p')

    sentences = [x.strip() for x in text.text_content().split(".")]
    sentence = '"' + sentences[0] + '"'

    maxlength = 430 - len((' - ' + url).encode('utf-8'))
    if len(sentence.encode('utf-8')) > maxlength:
        sentence = sentence.encode('utf-8')[:maxlength].decode(
            'utf-8', 'ignore')
        words = sentence[:-5].split(' ')
        words.pop()
        sentence = ' '.join(words) + ' [...]'

    return sentence + ' - ' + url
コード例 #37
0
ファイル: calc.py プロジェクト: PinkieThePrankster/PinkieBot
def calculate(phenny, input): 
    """Calculate things."""
    if not input.group(2):
        return phenny.reply("Nothing to calculate.")
    q = input.group(2)
    q = q.replace('\xcf\x95', 'phi') # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi') # utf-8 U+03C0
    q = q.replace('÷', '/')
    q = web.quote(q)
    uri = 'https://www.calcatraz.com/calculator/api?c=' + q
    answer = web.get(uri)
    if answer: 
        answerindex = 1
        if (len(answer.split(";")) < 2):
            answerindex = 0
        answer = answer.split(";")[answerindex]
        answer = answer.replace('  ','')
        #answer = ''.join(chr(ord(c)) for c in answer)
        #answer = answer.decode('utf-8')
        #answer = answer.replace('\\x26#215;', '*')
        #answer = answer.replace('\\x3c', '<')
        #answer = answer.replace('\\x3e', '>')
        #answer = answer.replace('<sup>', '^(')
        #answer = answer.replace('</sup>', ')')
        #answer = web.decode(answer)
        if re.compile('answer').match(answer):
            return phenny.say('Sorry, no result.')
        else:
            return phenny.say(answer)
    else: 
        return phenny.say('Sorry, no result.')
コード例 #38
0
ファイル: url.py プロジェクト: ahluntang/jenni
    def remote_call():
        pyurl = u'https://tumbolia.appspot.com/py/'
        code = 'import simplejson;'
        code += 'opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(),'
        code += 'urllib2.BaseHandler(), urllib2.HTTPHandler(),'
        code += 'urllib2.HTTPRedirectHandler(), urllib2.HTTPErrorProcessor(),'
        code += 'urllib2.UnknownHandler());'
        code += 'urllib2.install_opener(opener);'
        code += "req=urllib2.Request(%s, headers={'Accept':'*/*'});"
        code += "req.add_header('User-Agent', %s);"
        code += "u=urllib2.urlopen(req);"
        code += "rtn=dict();"
        code += "rtn['headers'] = u.headers.dict;"
        code += "contents = u.read(32768);"
        code += "con = str();"
        code += r'''exec "try: con=(contents).decode('utf-8')\n'''
        code += '''except: con=(contents).decode('iso-8859-1')";'''
        code += "rtn['read'] = con;"
        code += "rtn['url'] = u.url;"
        code += "rtn['geturl'] = u.geturl();"
        code += "print simplejson.dumps(rtn)"
        query = code % (repr(uri), repr(USER_AGENT))
        temp = web.quote(query)
        u = web.get(pyurl + temp)

        try:
            useful = json.loads(u)
            return True, useful
        except Exception, e:
            #print "%s -- Failed to parse json from web resource. -- %s" % (time.time(), str(e))
            return False, str(u)
コード例 #39
0
ファイル: weather.py プロジェクト: vtluug/phenny
def location(q):
    uri = 'https://nominatim.openstreetmap.org/search?{type}={query}&format=json'
    if q.isdigit():
        uri = uri.format(type='postalcode', query=web.quote(q))
    else:
        uri = uri.format(type='q', query=web.quote(q))
    results = web.get(uri)
    data = json.loads(results)

    if not data:
        return None, None

    latitude = float(data[0]['lat'])
    longitude = float(data[0]['lon'])

    return latitude, longitude
コード例 #40
0
ファイル: rule34.py プロジェクト: KaiCode2/phenny
def rule34(phenny, input):
    """.rule34 <query> - Rule 34: If it exists there is p**n of it."""

    q = input.group(2)
    if not q:
        phenny.say(rule34.__doc__.strip())
        return

    try:
        req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(web.quote(q)))
    except:
        raise GrumbleError("THE INTERNET IS F*****G BROKEN. Please try again later.")

    doc = lxml.html.fromstring(req)
    doc.make_links_absolute('http://rule34.xxx/')
    thumbs = doc.find_class('thumb')
    if len(thumbs) <= 0:
        phenny.reply("You just broke Rule 34! Better start uploading...")
        return

    try:
        link = thumbs[0].find('a').attrib['href']
    except AttributeError:
        raise GrumbleError("THE INTERNET IS F*****G BROKEN. Please try again later.")

    response = '!!NSFW!! -> {0} <- !!NSFW!!'.format(link)
    phenny.reply(response)
コード例 #41
0
ファイル: url.py プロジェクト: ahluntang/jenni
def unbitly(jenni, input):
    url = input.group(2)
    if not url:
        if hasattr(
                jenni,
                'last_seen_uri') and input.sender in jenni.bot.last_seen_uri:
            url = jenni.bot.last_seen_uri[input.sender]
        else:
            return jenni.say('No URL provided')
    if not url.startswith(('http://', 'https://')):
        url = 'http://' + url
    pyurl = u'https://tumbolia.appspot.com/py/'
    code = "req=urllib2.Request(%s, headers={'Accept':'*/*'});"
    code += "req.add_header('User-Agent', %s);"
    code += "u = urllib2.urlopen(req);"
    code += 'print u.geturl();'
    url = url.replace("'", r"\'")
    query = code % (repr(url.strip()), repr(USER_AGENT))
    try:
        temp = web.quote(query)
        u = web.get(pyurl + temp)
    except:
        return jenni.say('Failed to grab URL: %s' % (url))
    if u.startswith(('http://', 'https://')):
        jenni.say(u)
    else:
        jenni.say('Failed to obtain final destination.')
コード例 #42
0
ファイル: validate.py プロジェクト: vigneshv59/phenny
def val(phenny, input):
    """Check a webpage using the W3C Markup Validator."""
    if not input.group(2):
        return phenny.reply("Nothing to validate.")
    uri = input.group(2)
    if not uri.startswith('http://'):
        uri = 'http://' + uri

    path = '/check?uri=%s;output=xml' % web.quote(uri)
    info = web.head('http://validator.w3.org' + path)

    result = uri + ' is '

    if isinstance(info, list):
        return phenny.say('Got HTTP response %s' % info[1])

    if 'X-W3C-Validator-Status' in info:
        result += str(info['X-W3C-Validator-Status'])
        if info['X-W3C-Validator-Status'] != 'Valid':
            if 'X-W3C-Validator-Errors' in info:
                n = int(info['X-W3C-Validator-Errors'].split(' ')[0])
                if n != 1:
                    result += ' (%s errors)' % n
                else:
                    result += ' (%s error)' % n
    else:
        result += 'Unvalidatable: no X-W3C-Validator-Status'

    phenny.reply(result)
コード例 #43
0
ファイル: test_apy.py プロジェクト: goavki/phenny
    def test_perword(self, mock_open):
        # valid perword functions
        words = ['two', 'words']
        funcs = ['tagger', 'morph']
        per = [
            {'input': 'two', 'tagger': ['two<tags>'], 'morph': ['two<tags1>', 'two<tags2>']},
            {'input': 'words', 'tagger': ['words<tags>'], 'morph': ['words<tags1>', 'words<tags2>']}
        ]
        self.input.group.return_value = 'fra ({:s}) {:s}'.format(' '.join(funcs), ' '.join(words))
        mock_open.return_value.read.return_value = bytes(dumps(per), 'utf-8')
        apy.apertium_perword(self.phenny, self.input)
        mock_open.assert_called_once_with('{:s}/perWord?lang={:s}&modes={:s}&q={:s}'.format(
            self.phenny.config.APy_url, 'fra', '+'.join(funcs), quote(' '.join(words))))
        calls = []
        for word in per:
            calls.append(mock.call(word['input'] + ':'))
            for func in funcs:
                calls.append(mock.call('  {:9s}: {:s}'.format(func, ' '.join(word[func]))))
        self.assertEqual(self.phenny.say.call_args_list, calls)
        self.reset_mocks(self.phenny, mock_open)

        # bad input
        self.check_exceptions(['fra (tagger nonfunc) word'], apy.apertium_perword,
                              'invalid perword function')
        self.check_exceptions(['fra', 'fra (tagger)', '(tagger)', 'fra word',
                               '(tagger morph) word'], apy.apertium_perword)
コード例 #44
0
ファイル: hs.py プロジェクト: KaiCode2/phenny
def search(query):
    query = web.quote(query)
    try:
        r = web.get(SEARCH_URL.format(query), verify=False)
    except (web.ConnectionError, web.HTTPError):
        raise GrumbleError("THE INTERNET IS F*****G BROKEN. Please try again later.")

    # apparently the failure mode if you search for <3 characters is a blank
    # XML page...
    if len(r) <= 0:
        return False

    xml = lxml.etree.fromstring(r.encode('utf-8'))
    results = xml.findall('{0}directory-entries/{0}entry'.format(NS))
    if len(results) <= 0:
        return False

    ret = []
    for entry in results:
        entry_data = {}
        for attr in entry.findall('{0}attr'.format(NS)):
            entry_data[attr.attrib['name']] = attr[0].text
        ret.append(entry_data)

    return ret
コード例 #45
0
ファイル: rule34.py プロジェクト: TheDamnedScribe/PinkiePyBot
def derpibooru_search(query, phenny):
    query = query.replace('!', '')
    query = web.quote(query)
    if hasattr(phenny.config, 'derpibooru_key'):
        uri = 'https://derpibooru.org/search.json?q=' + query + '&key=' + phenny.config.derpibooru_key
    else:
        uri = 'https://derpibooru.org/search.json?q=' + query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    dhits = jsonstring['total']
    if dhits > 0:
        results = choice(jsonstring['search'])
        url = 'https:' + results['image']
        uploader = results['uploader']
        uploaded = results['created_at']
        try:
            import dateutil.parser
            isdateutil = True
            dt = dateutil.parser.parse(uploaded)
            timestamp1 = calendar.timegm(dt.timetuple())
            timestamp1 = time.gmtime(timestamp1)
            uploadedformat = time.strftime('%A %B %d, %G at %I:%M:%S %p',
                                           timestamp1)
        except:
            isdateutil = False
        if isdateutil is True:
            return url + ' uploaded by ' + uploader + ' on ' + uploadedformat
        else:
            return url + ' uploaded by ' + uploader
    else:
        return
コード例 #46
0
ファイル: search.py プロジェクト: KaiCode2/phenny
def duck_search(query): 
    query = query.replace('!', '')
    query = web.quote(query)
    uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query
    bytes = web.get(uri)
    m = r_duck.search(bytes)
    if m: return web.decode(m.group(1))
コード例 #47
0
ファイル: search.py プロジェクト: ZetaRift/CompuBot
def abbreviate_search(query, phenny):
    query = query.replace('!', '')
    webquery = web.quote(query)
    uri = 'http://www.nactem.ac.uk/software/acromine/dictionary.py?lf=' + webquery
    rec_bytes = web.get(uri, isSecure=False)
    jsonstring = json.loads(rec_bytes)
    try:
        asf = jsonstring[0]['sf']
    except:
        return
    try:
        a1 = jsonstring[0]['sf']
        a2 = jsonstring[1]['sf']
        a3 = jsonstring[2]['sf']
    except:
        try:
            a1 = jsonstring[0]['sf']
            a2 = jsonstring[1]['sf']
        except:
            try:
                a1 = jsonstring[0]['sf']
            except:
                return 'There was an error parsing the json data'
    try:
        return query + ' could be abbreviated as ' + a1 + ' or ' + a2 + ' or ' + a3
    except:
        try:
            return query + ' could be abbreviated as ' + a1 + ' or ' + a2
        except:
            return '1 result for ' + query + ', ' + a1
コード例 #48
0
def randquote_fetcher(phenny, topic, to_user):
    # create opener
    opener = urllib.request.build_opener()
    opener.addheaders = [
        ('User-agent', web.Grab().version),
        ('Referer', "http://quotes.firespeaker.org/"),
    ]

    try:
        url = "http://quotes.firespeaker.org/random.php"
        if topic:
            url += "?topic=%s" % web.quote(topic)
        req = opener.open(url)
        data = req.read().decode('utf-8')
        data = json.loads(data)
    except (HTTPError, IOError, ValueError) as e:
        raise GrumbleError("Firespeaker.org down? Try again later.") from e

    if len(data) == 0:
        phenny.say("No results found")
        return

    #result = data['list'][0]
    #url = 'http://www.urbandictionary.com/define.php?term={0}'.format(web.quote(word))
    #
    #response = "{0} - {1}".format(result['definition'].strip()[:256], url)

    if data['quote'] != None:
        quote = data['quote'].replace('</p>', '').replace('<p>', '').replace('<em>', '_').replace('</em>', '_').replace('&mdash;', '—')
        response = data['short_url'] + ' - ' + quote
    else:
        phenny.say("Sorry, no quotes returned!")
        return

    more.add_messages(phenny, to_user, response.split('\n'))
コード例 #49
0
ファイル: search.py プロジェクト: JordanKinsley/PinkiePyBot
def abbreviate_search(query, phenny): 
    query = query.replace('!', '')
    webquery = web.quote(query)
    uri = 'http://www.nactem.ac.uk/software/acromine/dictionary.py?lf=' + webquery
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    try:
        asf = jsonstring[0]['sf']
    except:
        return
    try:
        a1 = jsonstring[0]['sf']
        a2 = jsonstring[1]['sf']
        a3 = jsonstring[2]['sf']
    except:
        try:
            a1 = jsonstring[0]['sf']
            a2 = jsonstring[1]['sf']
        except:
            try:
                a1 = jsonstring[0]['sf']
            except:
                return 'There was an error parsing the json data'
    try:
        return query + ' could be abbreviated as ' + a1 + ' or ' + a2 + ' or ' + a3
    except:
        try:
            return query + ' could be abbreviated as ' + a1 + ' or ' + a2
        except:
            return '1 result for ' + query + ', ' + a1
コード例 #50
0
def search(query):
    query = web.quote(query)
    try:
        r = web.get(SEARCH_URL.format(query), verify=False)
    except (web.ConnectionError, web.HTTPError):
        raise GrumbleError(
            "THE INTERNET IS F*****G BROKEN. Please try again later.")

    # apparently the failure mode if you search for <3 characters is a blank
    # XML page...
    if len(r) <= 0:
        return False

    xml = lxml.etree.fromstring(r.encode('utf-8'))
    results = xml.findall('{0}directory-entries/{0}entry'.format(NS))
    if len(results) <= 0:
        return False

    ret = []
    for entry in results:
        entry_data = {}
        for attr in entry.findall('{0}attr'.format(NS)):
            entry_data[attr.attrib['name']] = attr[0].text
        ret.append(entry_data)

    return ret
コード例 #51
0
ファイル: rule34.py プロジェクト: ask-compu/CompuBot
def derpibooru_search(query, phenny):
    query = query.replace("!", "")
    query = web.quote(query)
    if hasattr(phenny.config, "derpibooru_key"):
        uri = "https://derpibooru.org/search.json?q=" + query + "&key=" + phenny.config.derpibooru_key
    else:
        uri = "https://derpibooru.org/search.json?q=" + query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    dhits = jsonstring["total"]
    if dhits > 0:
        results = choice(jsonstring["search"])
        url = "https:" + results["image"]
        uploader = results["uploader"]
        uploaded = results["created_at"]
        try:
            import dateutil.parser

            isdateutil = True
            dt = dateutil.parser.parse(uploaded)
            timestamp1 = calendar.timegm(dt.timetuple())
            timestamp1 = time.gmtime(timestamp1)
            uploadedformat = time.strftime("%A %B %d, %Y at %I:%M:%S %p", timestamp1)
        except:
            isdateutil = False
        if isdateutil is True:
            return url + " uploaded by " + uploader + " on " + uploadedformat
        else:
            return url + " uploaded by " + uploader
    else:
        return
コード例 #52
0
ファイル: wiktionary.py プロジェクト: corneyflorex/phenny
def wiktionary(word): 
   bytes = web.get(uri % web.quote(word))
   bytes = r_ul.sub('', bytes)

   mode = None
   etymology = None
   definitions = {}
   for line in bytes.splitlines(): 
      if 'id="Etymology"' in line: 
         mode = 'etymology'
      elif 'id="Noun"' in line: 
         mode = 'noun'
      elif 'id="Verb"' in line: 
         mode = 'verb'
      elif 'id="Adjective"' in line: 
         mode = 'adjective'
      elif 'id="Adverb"' in line: 
         mode = 'adverb'
      elif 'id="Interjection"' in line: 
         mode = 'interjection'
      elif 'id="Particle"' in line: 
         mode = 'particle'
      elif 'id="Preposition"' in line: 
         mode = 'preposition'
      elif 'id="' in line: 
         mode = None

      elif (mode == 'etmyology') and ('<p>' in line): 
         etymology = text(line)
      elif (mode is not None) and ('<li>' in line): 
         definitions.setdefault(mode, []).append(text(line))

      if '<hr' in line: 
         break
   return etymology, definitions
コード例 #53
0
ファイル: url.py プロジェクト: lardissone/jenni
def unbitly(jenni, input):
    url = input.group(2)
    if not url:
        #return jenni.say('No URL provided')
        if hasattr(jenni, 'last_seen_uri') and input.sender in jenni.bot.last_seen_uri:
            url = jenni.bot.last_seen_uri[input.sender]
        else:
            return jenni.say('No URL provided')
    if not url.startswith(('http://', 'https://')):
        url = 'http://' + url
    pyurl = u'https://tumbolia.appspot.com/py/'
    code = "req=urllib2.Request(%s, headers={'Accept':'*/*'});"
    code += "req.add_header('User-Agent', %s);"
    code += "u = urllib2.urlopen(req);"
    code += 'print u.geturl();'
    url = url.replace("'", r"\'")
    query = code % (repr(url.strip()), repr(USER_AGENT))
    try:
        temp = web.quote(query)
        u = web.get(pyurl + temp)
    except:
        return jenni.say('Failed to grab URL: %s' % (url))
    if u.startswith(('http://', 'https://')):
        jenni.say(u)
    else:
        jenni.say('Failed to obtain final destination.')
コード例 #54
0
ファイル: url.py プロジェクト: lardissone/jenni
    def remote_call():
        pyurl = u'https://tumbolia.appspot.com/py/'
        code = 'import simplejson;'
        code += 'opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(),'
        code += 'urllib2.BaseHandler(), urllib2.HTTPHandler(),'
        code += 'urllib2.HTTPRedirectHandler(), urllib2.HTTPErrorProcessor(),'
        code += 'urllib2.UnknownHandler());'
        code += 'urllib2.install_opener(opener);'
        code += "req=urllib2.Request(%s, headers={'Accept':'*/*'});"
        code += "req.add_header('User-Agent', %s);"
        code += "u=urllib2.urlopen(req);"
        code += "rtn=dict();"
        code += "rtn['headers'] = u.headers.dict;"
        code += "contents = u.read(32768);"
        code += "con = str();"
        code += r'''exec "try: con=(contents).decode('utf-8')\n'''
        code += '''except: con=(contents).decode('iso-8859-1')";'''
        code += "rtn['read'] = con;"
        code += "rtn['url'] = u.url;"
        code += "rtn['geturl'] = u.geturl();"
        code += "print simplejson.dumps(rtn)"
        query = code % (repr(uri), repr(USER_AGENT))
        temp = web.quote(query)
        u = web.get(pyurl + temp)

        try:
            useful = json.loads(u)
            return True, useful
        except Exception, e:
            #print "%s -- Failed to parse json from web resource. -- %s" % (time.time(), str(e))
            return False, str(u)
コード例 #55
0
ファイル: search.py プロジェクト: telnoratti/phenny
def duck_search(query): 
    query = query.replace('!', '')
    query = web.quote(query)
    uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query
    bytes = web.get(uri)
    m = r_duck.search(bytes)
    if m: return web.decode(m.group(1))
コード例 #56
0
def now_playing(phenny, input):
    nick = input.nick.casefold()
    user = ""
    arg = input.group(1)
    if not arg or len(arg.strip()) == 0:
        user = resolve_username(nick) # use the sender
        if not user: #nick didnt resolve
            user = nick
    else: # use the argument
        user = resolve_username(arg.strip())
        if not user: # user didnt resolve
            user = arg
    user = user.strip()
    try:
        req = web.get("%smethod=user.getrecenttracks&user=%s" % (APIURL, web.quote(user)))
    except web.HTTPError as e:
        if e.response.status_code == 400:
            phenny.say("%s doesn't exist on last.fm, perhaps they need to set user" % (user))
            return
        else:
            phenny.say("uhoh. try again later, mmkay?")
            return
    root = etree.fromstring(req.encode('utf-8'))
    recenttracks = list(root)
    if len(recenttracks) == 0:
        phenny.say("%s hasn't played anything recently. this isn't you? try lastfm-set" % (user))
        return
    tracks = list(recenttracks[0])
    #print(etree.tostring(recenttracks[0]))
    if len(tracks) == 0:
        phenny.say("%s hasn't played anything recently. this isn't you? try lastfm-set" % (user))
        return
    first = tracks[0]
    now = True if first.get("nowplaying") == "true" else False
    tags = {}
    for e in first.getiterator():
        tags[e.tag] = e

    track = tags['name'].text.strip()

    artist = tags['artist'].text.strip()

    album = "unknown"
    if tags['album'].text:
        album = tags['album'].text

    date = None
    stamp = None
    if not now:
        date = tags['date'].get("uts")
        stamp = int(date)

    if now:
        present = get_verb(nick)[1]
        phenny.say("%s %s \"%s\" by %s on %s" %(user.strip(), present.strip(), track, artist, album ))
        return
    else:
        past = get_verb(nick)[0]
        phenny.say("%s %s \"%s\" by %s on %s %s" %(user.strip(), past.strip(), track, artist, album, pretty_date(stamp)))
コード例 #57
0
def quote_str(input_str, safe=':,./&+?#=@'):
    """
    Quote a string.
    :param input_str: str input string.
    :param safe: str characters not to be quoted.
    :return: str quoted string
    """
    return quote(input_str, safe=safe)
コード例 #58
0
ファイル: ham.py プロジェクト: wolfy1339/jenni
def cs(jenni, input):
    '''.cs <callsign> -- queries qth.com for call sign information'''
    cs = input.group(2).upper()
    try:
        link = "http://www.qth.com/callsign.php?cs=" + uc.decode(web.quote(cs))
    except Exception, e:
        print e
        return jenni.say('Failed to obtain data from qth.com')