def callmodule(message,peer): message=message.lower() modules=["wiki","bot","google"] #Add module name here so that the for loop below works for module in modules: if (message.find(module)==0): message=message[len(module)+1:] if module=="wiki": reply=wiki(message) if ("Cannot acces link!" in reply): reply="No wikipedia article on that, googling instead\n"+google(message) return reply if module=="google": return google(message) if module=="bot": message=message.lstrip() reply=wolfram(message) if (reply=="noidea"): reply="tough. I'll google that\n"+google(message) return reply global chattybot if chattybot: global botsessions global bot1 if peer not in botsessions: botsessions[peer]=bot1.create_session() reply = botsessions[peer].think(message) VALID_TAGS = ['br'] soup = BeautifulSoup(reply) for tag in soup.findAll(True): if tag.name not in VALID_TAGS: tag.hidden = True reply=soup.renderContents() reply=reply.replace('<br />','\n') return reply
def getTextFromWeb(self): num_results = 10 search_list = ["bbc", "Little Red Riding Hood"] sites = [] text = [] results = [] while len(search_list)!=0 and len(results) < num_results: search = search_list.pop() results = results + google.google(search,nltk.word_tokenize) for d in results: sites.append(d) if len(sites) == num_results: break for url in sites: print url try: page = urllib2.urlopen(url).read() except urllib2.HTTPError, e: print "Search failed: %s" % e continue paragraphs = justext.justext(page, justext.get_stoplist('English')) if len(text) < 50: for paragraph in paragraphs: if paragraph['class'] == 'good' and len(text) < 50: sentences = self.segment_sentences(paragraph['text'].encode('utf8')) for s in sentences: if not text.__contains__(s): text.append(s)
def getTextFromWeb(self): num_results = 10 search_list = ["bbc", "Little Red Riding Hood"] sites = [] text = [] results = [] while len(search_list) != 0 and len(results) < num_results: search = search_list.pop() results = results + google.google(search, nltk.word_tokenize) for d in results: sites.append(d) if len(sites) == num_results: break for url in sites: print url try: page = urllib2.urlopen(url).read() except urllib2.HTTPError, e: print "Search failed: %s" % e continue paragraphs = justext.justext(page, justext.get_stoplist('English')) if len(text) < 50: for paragraph in paragraphs: if paragraph['class'] == 'good' and len(text) < 50: sentences = self.segment_sentences( paragraph['text'].encode('utf8')) for s in sentences: if not text.__contains__(s): text.append(s)
def on_enter_google_search_state(self, event): print("I'm entering google search state") sender_id = event['sender']['id'] search_result = google(event['message']['text']) send_text_message(sender_id, "我們幫你從Google找到:") send_text_message(sender_id, search_result) self.go_back(event)
def callmodule(message, peer): message = message.lower() modules = ["wiki", "bot", "google" ] #Add module name here so that the for loop below works for module in modules: if (message.find(module) == 0): message = message[len(module) + 1:] if module == "wiki": reply = wiki(message) if ("Cannot acces link!" in reply): reply = "No wikipedia article on that, googling instead\n" + google( message) return reply if module == "google": return google(message) if module == "bot": message = message.lstrip() reply = wolfram(message) if (reply == "noidea"): reply = "tough. I'll google that\n" + google(message) return reply global chattybot if chattybot: global botsessions global bot1 if peer not in botsessions: botsessions[peer] = bot1.create_session() reply = botsessions[peer].think(message) VALID_TAGS = ['br'] soup = BeautifulSoup(reply) for tag in soup.findAll(True): if tag.name not in VALID_TAGS: tag.hidden = True reply = soup.renderContents() reply = reply.replace('<br />', '\n') return reply
def wolfram_it(query): result = client.query(query) response = '' interpreted_response = '' for i in result.pods: if ('Input' in i.title): interpreted_response = i.text elif i.text: response += i.text.replace(u'\xb0F', '') break if response: return response elif interpreted_response: return interpreted_response else: return "I don't know idiot, google it? " + google(query)
def wolfram_it(query): result = client.query(query) response = "" interpreted_response = "" for i in result.pods: if "Input" in i.title: interpreted_response = i.text elif i.text: response += i.text.replace(u"\xb0F", "") break if response: return response elif interpreted_response: return interpreted_response else: return "I don't know idiot, google it? " + google(query)
def getfy(self): '''获取翻译对象 ''' self.logger.debug("getfy call") s = self.comboxlist0.get() if s == "使用谷歌翻译": return google() elif s == "使用百度翻译": return baidu() elif s == "C/C++代码运行": return runcpp() elif s == "C/C++代码美化": return formatcpp() elif s == "发布临时文字": return pasteUbuntu() else: k = plus_traslate() # 从dll插件获取 k.s = self return k
from requests_html import HTMLSession import re import json import Wbads import google import resume session = HTMLSession() film = google.google("la ligne verte", session) r = session.get(film[0]) dic1 = Wbads.distrib_and_date(r) dic2 = resume.image_descript(r) fiche = {**dic2, **dic1} print(fiche)
facebook().logout(driver) elif "message" in keyword["transcription"].lower(): facebook().message(driver) elif "scroll down" in keyword["transcription"].lower(): common().scrolldown(driver) elif "scroll up" in keyword["transcription"].lower(): common().scrollup(driver) elif "play" in keyword["transcription"].lower( ) or "pause" in keyword["transcription"].lower(): youtube().playpause(driver) elif "open notifications" in keyword["transcription"].lower( ) or "close notifications" in keyword["transcription"].lower(): facebook().notification(driver) elif "open google" in keyword["transcription"].lower(): print("What do want to search for?") google().start_google(driver) while (1): print("I am listening") keyword2 = main().recognize_speech_from_mic(recognizer, microphone) if keyword2["transcription"]: break if not keyword2["success"]: break print("I didn't catch that. What did you say?\n") google().search_google(driver, keyword2["transcription"]) elif "open youtube" in keyword["transcription"].lower(): youtube().start_youtube(driver) elif "forward video" in keyword["transcription"].lower(): youtube().fvideo(driver) elif "reverse video" in keyword["transcription"].lower(): youtube().rvideo(driver)
def callmodule(message,peer,search): global flag global cursor global cnxn message=message.lower() modules=["wiki","bot","google","math","trans","weather","cricket","youtube","nearby"] #Add module name here so that the for loop below works for module in modules: if (message.find(module)==0): message=message[len(module)+1:] if module=="wiki": reply=wiki(message,peer,search,cursor,cnxn) print reply try: if ("Cannot acces link!" in reply): reply="No wikipedia article on that, googling instead\n"+google(message) return reply except: print"except" return None if module=="google": return google(message) if module=="bot": message=message.lstrip() reply=wolfram(message) if (reply=="noidea"): reply="tough. I'll google that\n"+google(message) return reply if module=="math": a=eval(compile(message, '<string>', 'eval', __future__.division.compiler_flag)) #print a return str(a) if module=="trans": result=translate(message) return result if module=="nearby": print "nearby" m=message.split(",") m1=m[0] print m1 try: m2=m[1] except: m2=10000 print m2 places(m1,peer,search,cursor,cnxn,m2) return None if module=="weather": result=setUrl(message) return result if module=="youtube": #print "done" try: a=querry(message,peer,search,cursor,cnxn) return None except: print"else main" return None # return module # else: #return result if module=="cricket": cric = CricbuzzParser() match = cric.getXml() details = cric.handleMatches(match) #Returns Match details as a Dictionary. Parse it according to requirements. b='' for i in details : b=b+str(i) #print b return b global chattybot if chattybot: global botsessions global bot1 if peer not in botsessions: botsessions[peer]=bot1.create_session() reply = botsessions[peer].think(message) VALID_TAGS = ['br'] soup = BeautifulSoup(reply) for tag in soup.findAll(True): if tag.name not in VALID_TAGS: tag.hidden = True reply=soup.renderContents() reply=reply.replace('<br />','\n') return reply
# -*- coding: utf-8 -*- import sys import codecs import google if __name__ == '__main__': infile = codecs.open(sys.argv[1], 'r', 'utf-8') outfile = codecs.open(sys.argv[1].split('.')[0] + '.ext', 'a', 'utf-8') skip = int(sys.argv[2]) print 'skip %d lines' % skip browser = google.get_browser() firstline = True for l in infile: if firstline: firstline = False continue csv = l.split(',') # print csv n = int(csv[0]) if n <= skip: continue word = csv[2] # кавычки оставим для улучшения результатов поиска print "processing " + word result = google.google(browser, word) outfile.write('%d,%s,%s\n' % (n, word, ','.join(result))) outfile.flush() infile.close() outfile.close()
ola Valtercio bem vindo de volta. Estou a sua desposição. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ''') perg = str(input('Voce: ')) # entrada com o nome jarvis print() jarvis = '' if perg == 'não quero fazer nada': # comando para sair do Bot print() # para da espaços entre as linhas print('Ok senhor, até a proxima...') if perg == 'jarvis': # comando para iniciar o Bot jarvis = str( input('ola senhor o que deseja? ')) # escolha das funções do Bot. print() # para da espaços entre as linhas if jarvis == 'faca uma busca' or jarvis == 'faça uma busca': # Bot faz uma busca no google ou youtube google.google() if jarvis == 'abra um site' or jarvis == 'quero abrir um site': # Bot abrirá um site web.site() # funcao para abrir sites if jarvis == 'quero conversar': # para conversar com o Bot chat.conv() # funcao do chat if jarvis == 'faca uma busca na wikipedia' or jarvis == 'faça uma busca na wikipedia': # Bot pesquisa na wikipedia wikipedia.wiki() # funcao da wikipedia if jarvis == 'faca uma busca na trend': trend.trend()