def inf(d, s): wikipedia.summary("Wikipedia") wikipedia.set_lang("ru") types = ['museum', 'park', 'church', 'zoo', 'train_station', 'stadium'] def param(t): content = urlopen( 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?language=ru&location=' + str(d) + ',' + str( s) + '&rankby=distance&types=' + t + '&key=' + apikey).read() c = json.loads(content.decode("utf-8")) c = c.get('results') if len(c) != 0: c = c[0].get('name') # print(c) m = wikipedia.search(c, results=5, suggestion=False) # print(m[0]) if len(m) != 0: textsong = wikipedia.summary(m, sentences=5, chars=1) if textsong != '': return textsong #print(textsong) # if len(wikipedia.search(c)) != 0: # st = wikipedia.page(c) # if st.content # print(st.content) for type in types: #i in range(6): temp = param(type) if temp: return temp
def search(self, lang, queries, articles_per_query=10, should_break=None, on_progress=None): """ Searches for articles. Args: lang(str): A language code in ISO 639-1 format. queries(list of str): A list of queries. should_break (callback): Callback for breaking the computation before the end. If it evaluates to True, downloading is stopped and document downloaded till now are returned in a Corpus. on_progress (callable): Callback for progress bar. """ wikipedia.set_lang(lang) results = [] for i, query in enumerate(queries): try: articles = wikipedia.search(query, results=articles_per_query) for j, article in enumerate(articles): if callable(should_break) and should_break(): break results.extend(self._get(article, query, should_break)) if callable(on_progress): on_progress((i*articles_per_query + j+1) / (len(queries) * articles_per_query), len(results)) except (wikipedia.exceptions.HTTPTimeoutError, IOError) as e: self.on_error(str(e)) break if callable(should_break) and should_break(): break return Corpus.from_documents(results, 'Wikipedia', self.attributes, self.class_vars, self.metas, title_indices=[-1])
def get_info_box_data(word): wikipedia.set_lang("He") res = wikipedia.search(word, 10, True) title = res[0][0] html_page = wikipedia.page(title).html() soup = BeautifulSoup(html_page) info_table = soup.find("table", {"class": "infobox"}) info = [] current_tuple = tuple() rows = info_table.findChildren(["th", "tr", "td"]) for row in rows: result = "" row_title = get_title(row) values = row.findChildren("a") if len(values) == 0: continue for value in values: # value = cell['content'] # print "The value in this cell is %s" % value for content in value.contents: if "img" in content: continue result += " " + (str)(content) if "img" in result: continue
def get(self, keyword): if check_contain_chinese(keyword): wikipedia.set_lang("zh") print("set lang to zh") result = wikipedia.summary(keyword) print(result) return {"result": result}
def searchWiki(page): wikipedia.set_lang("fr") link = '' try: # p = wikipedia.page(page) # link = p.url propos = wikipedia.search(page,results=5,suggestion=False) for choice in propos: if choice.encode('utf-8') == page.encode('utf-8'): p = wikipedia.page(page) link = p.url break elif page in choice: #TODO print 'There is a proposition containing the keyWord ' print choice else: try: wikipedia.page(page,redirect=False,auto_suggest=False) except wikipedia.exceptions.RedirectError: p = wikipedia.page(page) link = p.url break except: link ='' except: link = "" return link#.encode('utf-8')
def get_wiki_info(title): """This function retrieves information from the Wikipedia API. :param title: A title of a possible wikipedia page :type title: str :returns: an object with information of the retrieved page :rtype: wikipedia.wikipedia.WikipediaPage object :Example: >>> result = get_wiki_info('Cervantes') >>> print(result.url) https://es.wikipedia.org/wiki/Miguel_de_Cervantes >>> print(result.title) Miguel de Cervantes >>> print(type(result)) <class 'wikipedia.wikipedia.WikipediaPage'> >>> result = get_wiki_info('Cervantesssssssssssss') >>> print(type(result)) <class 'str'> """ wp.set_lang('es') # lang is hardcoded as i18n is usefulness # except in Europe, but I don't have the time to # translate the app to other European countries. # OTOH, addresses in anglosaxon # countries are really boring :-P try: info = wp.page(title) return info except: msg = "<H2>¡Lo sentimos!</H2>\n" msg += "<p>Hemos fallado miserablemente al ofrecerte este servicio.</p>\n" msg += "<p>Vuelve al mapa e inténtalo de nuevo.</p>" return msg
def main(path_extracted_wikipedia_text:str, wikipedia_article_names:List[Tuple[str, str]]): wikipedia.set_lang('ja') wikipedia_text_data = {} extracted_summary_text = [] for article_name in wikipedia_article_names: text = get_wikipedia_summary(article_name[0]) wikipedia_text_format = {} wikipedia_text_format["page_title"] = article_name[0] wikipedia_text_format["text"] = text wikipedia_text_format["gold_label"] = article_name[1] logger.info(msg='Got wikipedia page={}'.format(article_name)) if not text is False: logger.info(msg='It gets text from page-name={}'.format(article_name)) extracted_summary_text.append(wikipedia_text_format) time.sleep(SLEEP_TIME) extracted_full_text = [] for article_name in wikipedia_article_names: text = get_wikipedia_page(article_name[0]) wikipedia_text_format = {} wikipedia_text_format["page_title"] = article_name[0] wikipedia_text_format["text"] = text wikipedia_text_format["gold_label"] = article_name[1] logger.info(msg='Got wikipedia page={}'.format(article_name)) if not text is False: logger.info(msg='It gets text from page-name={}'.format(article_name)) extracted_full_text.append(wikipedia_text_format) time.sleep(SLEEP_TIME) wikipedia_text_data['summary'] = extracted_summary_text wikipedia_text_data['full'] = extracted_full_text with open(path_extracted_wikipedia_text, 'w') as f: f.write(json.dumps(wikipedia_text_data, ensure_ascii=False, indent=4))
def wikipedia_page(message, option, query): """ Wikipediaで検索した結果を返す """ if query == 'help': return # set language lang = 'ja' if option: _, lang = option.split('-') wikipedia.set_lang(lang) try: # search with query results = wikipedia.search(query) except: message.send('指定された言語 `{}` は存在しません'.format(lang)) return # get first result if results: page = wikipedia.page(results[0]) attachments = [{ 'fallback': 'Wikipedia: {}'.format(page.title), 'pretext': 'Wikipedia: <{}|{}>'.format(page.url, page.title), 'text': page.summary, }] message.send_webapi('', json.dumps(attachments)) else: message.send('`{}` に該当するページはありません'.format(query))
def wiki_func(paras, infos): """中文维基百科查询""" wikipedia.set_lang("zh") candidates = wikipedia.search(paras) if len(candidates) <= 0: return { 'text': 'not found', } else: summary = None for keyword in candidates: try: summary = wikipedia.summary(keyword, sentences=1) break except Exception: # 可能发生歧义异常,见 wikipedia 文档 continue if summary: answer = decode_to_unicode(summary) + \ u'\n候选关键词: %s' % u', '.join(candidates) return { 'text': answer, } else: return { 'text': 'not found', }
def search_lang(self, c, e, args): wikipedia.set_lang(args[0]) p = wikipedia.page(' '.join(args[1:])) if p: c.privmsg(get_target(c, e), '\x02{}\x0f - {} [ {} ]'.format(p.title, smart_truncate(p.summary.replace('\n', ' ')), p.url)) wikipedia.set_lang(self.bot.config.get('wikipedia', 'lang'))
def add_wiki_data_to_person(person): """function to mine data from a wikipedia page and save it to the person object""" wikipedia.set_lang("de") for entry in wikipedia.search(person.name, results=4): avg_time = int(Regeste.objects.filter(issuer=person).aggregate(Avg("issue_date"))['issue_date__avg']) avg_year = datetime.datetime.fromtimestamp(int(avg_time)).year try: page_de = wikipedia.page(entry) categories = page_de.categories for category in categories: try: regex_result = re.findall("Kategorie:Gestorben (\d+)|Kategorie:Geboren (\d+)", category) search_results = list(filter(None, flatten(regex_result))) for search_result in search_results: if abs(int(search_result)-avg_year) <= 30: if len(page_de.images) > 0: if page_de.images[0][-4:] == ".svg": person.img_url = page_de.images[1] else: person.img_url = page_de.images[0] person.short_description = page_de.summary person.save() except IndexError: pass except wikipedia.exceptions.DisambiguationError and wikipedia.exceptions.PageError: pass return
def find_definition(command): if(command.startswith("what is a ")): command = command[10:len(command)] elif (command.startswith("what is ") or command.startswith("who are ")): command = command[8:len(command)] elif (command.startswith("what are ") or command.startswith("what's a ")): command = command[9:len(command)] elif (command.startswith("who is ")): command = command[7:len(command)] elif (command.startswith("what's")): command = command[6:len(command)] if (command[len(command) - 1] == '?'): command = command[0:len(command) - 1] try: if (wiki_lang != ""): wikipedia.set_lang(wiki_lang[:-1]) if (wiki_lang != "zh:"): lang = wiki_lang else: lang = "zh-cn:" command = command.strip() command = handle_translation(lang + command).lower() command = command.title() response = '```' + wikipedia.summary(command, sentences=wiki_line) + '```' except: response = "Need a clear one" return response
def _extract_from_wiki(self): title = self.title_line_edit.text() if title: page = self.page_combo_box.currentText() wikipedia.set_lang(self.lang_combo_box.currentText()) self.load_progressbar.setMinimum(0) self.load_progressbar.setMaximum(0) class ProgressThread(QThread, QWidget): content_link_arrived = pyqtSignal([list]) content_text_arrived = pyqtSignal(['QString']) content_image_arrived = pyqtSignal([list, 'QString']) error_occurred = pyqtSignal() valid_images = [] def run(self): try: wiki = wikipedia.page(title=title) f = open('templates/template.html') if page == 'Content': self.content_text_arrived.emit(wiki.content) elif page == 'Images': print(wiki.images) self.des_dir = Preferences.output_path + '/' + title self.valid_images = [] if not os.path.exists(self.des_dir): print(self.des_dir) os.mkdir(self.des_dir) for i in wiki.images: if PurePath(i).suffix in Preferences.valid_image_formats: print(i) print(self.des_dir) wget.download(i, out=self.des_dir) self.valid_images.append(i) self.content_image_arrived.emit(self.valid_images, self.des_dir) elif page == 'Summary': self.content_text_arrived.emit(wiki.summary) elif page == 'Images Links': self.content_link_arrived.emit(wiki.images) elif page == 'References Links': self.content_link_arrived.emit(wiki.references) except: self.error_occurred.emit() self.progress_thread = ProgressThread() self.progress_thread.content_link_arrived.connect(self.set_content_link) self.progress_thread.content_text_arrived.connect(self.set_content_text) self.progress_thread.content_image_arrived.connect(self.set_content_image) self.progress_thread.error_occurred.connect(self.handle_error_occurred) self.progress_thread.start() else: self.content_text_browser.clear() self.content_text_browser.setEnabled(False)
def __init__(self, dispatcher): wikipedia.set_lang("de") # Last used or searched pageid by the user. self.last_pageid = {} # current_topic.get(chat_id, None) returns None if chat_id doesn't exist as key dispatcher.add_handler(CommandHandler('wiki', self.get_article))
def collectFrom(lang,start,hangang): wikipedia.set_lang(lang) lookpa = wikipedia.page(start).links lookna = [wikipedia.page(start)] corpus = str(wikipedia.page(start).content) while len(corpus) < hangang: random.shuffle(lookpa) item = lookpa[0] try: corpus += str(wikipedia.page(item).content) except wikipedia.exceptions.PageError: pass except wikipedia.exceptions.DisambiguationError: pass except KeyError: pass lookna.append(item) lookpa.remove(item) try: for page in wikipedia.page(item).links: if page not in lookpa: if page not in lookna: lookpa.append(page) except wikipedia.exceptions.PageError: pass except wikipedia.exceptions.DisambiguationError: pass except KeyError: pass print('Corpus = ' + str(len(corpus)) + ' Searched = ' + str(len(lookna)) + ' Still = ' + str(len(lookpa))) f = open(lang + 'FromWikiCorp.txt', 'w') f.write(corpus) f.close()
def activate(self): super().activate() if self.config['LANGUAGE'] in wikipedia.languages(): wikipedia.set_lang(self.config['LANGUAGE']) else: self.log.warning('{0} is not a valid language code.'.format( self.config['LANGUAGE'])) return
def summary(her, m): query = m.group(1) wikipedia.set_lang(get_language(query)) res = wikipedia.summary(query, sentences=2) page = wikipedia.page(query) her.say("%s\n%s" % (res, page.url))
def get_article(self): try: wikipedia.set_lang(self.lang) article = wikipedia.page(self.search) return article except Exception as e: logger.error('Getting wiki article error : ' + str(e)) raise WikiException(str(e))
def get_summary(self): try: wikipedia.set_lang(self.lang) summary = wikipedia.summary(self.search) return summary except: logger.error('Getting wiki summary error : ' + str(e)) raise WikiException(str(e))
def __init__(self, lang): """ Initializes the wikiScraper class, given a language. """ # set rate limit wikipedia.set_rate_limiting(True, min_wait=datetime.timedelta(0, 0, 500000)) # set language wikipedia.set_lang(lang)
def main(authors_file, output_directory): """ :param authors_file: e.g. /users/networks/my_own_file.txt :param output_directory: e.g. /users/networks/wikis """ wikipedia.set_lang("de") # change language crawl_wikipedia(authors_file, output_directory)
def __init__(self): """ Init wikipedia """ language = getdefaultlocale()[0][0:2] wikipedia.set_lang(language) # Translators: Put here words added by wikipedia in band search # Ex: Muse_(band), Peaches(musician) self._search_str = _("musician;band")
def send_welcome(message): wikipedia.set_lang("en") try: text = ' '.join(message.text.split()[1:]).strip() response = wikipedia.summary(text).strip() response += '\n\n\n' + wikipedia.page(text).url except Exception as e: response = str(e) bot.reply_to(message, response)
def load_article(self, title): """ Load the article from Wikipedia """ wikipedia.set_lang(self.language) self.article_name = title page = wikipedia.page(title) page = self.store_images(page) self.content = self.process_html(page)
def _send_request(self, query, **kwargs): lang = kwargs.pop("lang", self.DEFAULT_LANG) wikipedia.set_lang(lang) try: page = wikipedia.page(query, auto_suggest=True, redirect=True, preload=True) return page except (wikipedia.PageError, wikipedia.DisambiguationError) as e: return None
def search(self,query): links = [] wikipedia.set_lang("es") ids = wikipedia.search(query) for id in ids: wiki = wikipedia.page(id) refs = wiki.references links.extend(refs) return links
def __init__(self): """ Init wikipedia """ language = getdefaultlocale()[0][0:2] wikipedia.set_lang(language) # Translators: Put here words added by wikipedia in band search # Ex: Muse_(band), Peaches(musician) # Unused be we keep this for now (as it's already translated) self._search_str = _("musician;band")
def main(): parser = argparse.ArgumentParser( description='Toma un tema de la Wikipedia y lo twitea como hilo') parser.add_argument('-c', action='store_true', help='Loguearse como el último usuario utilizado') parser.add_argument('-s', action='store_true', help='Postear no solo el resumen de Wikipedia sino ' + 'también sus secciones') args = parser.parse_args() use_last_creds = args.c post_sections = args.s wikipedia.set_lang('es') name = input('¿De qué quieres tirarte el pisto?: ') search_results = wikipedia.search(name) if len(search_results) > 1: result_str = '' for i, e in enumerate(search_results): result_str += '[{}] {}\n'.format(i+1, e) print() option = input('Sé más preciso:\n' + result_str + '\nNúmero de opción: ') page = wikipedia.page(search_results[int(option)-1]) elif len(search_results) == 1: page = wikipedia.page(search_results[0]) else: print('Lo siento, no hay nada para esa búsqueda :(') exit(0) # Store the page as a list of strings text = [u'No sé si conocéis {}. Abro hilo \U0001F447'.format(page.title)] text.extend([clean_string(i) for i in page.summary.splitlines()]) if post_sections: for section in page.sections: if section in IGNORED_SECTIONS: continue text.append('##{}'.format(section)) text.extend( [clean_string(i) for i in page.section(section).splitlines()]) # Split text into tweets tweets = split_text(text) print() twclient.post_as_thread(tweets, use_last_creds) print('¡Enhorabuena!' + '¡Ahora todos piensan que eres un experto en {}!'.format(page.title))
def get_category(word): wikipedia.set_lang("He") res = wikipedia.search(word, 10, True) if len(res[0]) != 0: title = res[0][0] if is_category(title): return random.choice(res[0]) return get_catageories_helper(title) else: if res[1] is not None: return get_catageories_helper(res[1][0])
def send_wiki_info(who, text): answ=" ".join(text) if(answ[-1] == "?"): answ = answ[:-1] wikipedia.set_lang("ru") try: resp=wikipedia.summary(answ, sentences=6, chars=1, auto_suggest=False, redirect=True) except wikipedia.exceptions.DisambiguationError as error: resp=wikipedia.summary(error.options[0], sentences=6, chars=1, auto_suggest=False, redirect=True) except wikipedia.exceptions.WikipediaException: resp=wikipedia.summary(answ, sentences=6, chars=0, auto_suggest=True, redirect=True) bot.messages.send(peer_id=who, random_id=random.randint(0, 200000),message=resp)
#!/usr/bin/env python # coding: utf-8 # In[ ]: """ get_ipython().system(' pip install wolframalpha') get_ipython().system(' pip install wikipedia') """ # In[ ]: import wolframalpha import wikipedia import requests app_id = "virtu@l_@ssitent" client = wolframalpha.Client(app_id) while True: raw_input = input("\nFaça sua Pesquisa: ") wikipedia.set_lang("pt") print('\n=========>>> Resposta <<<=========\n') print(wikipedia.summary(raw_input, sentences=2))
def OnEnter(self, event): user_input = self.txt.GetValue() user_input = user_input.lower() # Voice recognition input if user_input == '': r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: # Modified so the passed data isn't unicode and the variable stores the value for later self.txt.SetValue(str(r.recognize_google(audio))) user_input = str(r.recognize_google(audio)) except sr.UnknownValueError: print( "Google Speech Recognition could not understand.\nPlease try again." ) except sr.RequestError as e: print( "Could not request results from Google Speech Recognition Service; {0}" .format(e)) # Try to catch any disambugation and other exceptions try: # Wolfram app_id = "YOUR-WOLFRAM-ID" client = wolframalpha.Client(app_id) res = client.query(user_input) answer = next(res.results).text print(answer) except: # Wikipedia """ First, cleanse the string of 'who', 'what', 'where' queries to obtain info """ query_options = [ "who is", "what is", "where is", "what does", "how does", "who does" ] split_input = user_input.split(" ") # Then it checks if the input string contains any of the query modifiers for option in query_options: if user_input.__contains__(option): for i in range( 2 ): # Removes the very first thing in the list twice split_input.remove(split_input[0]) # Rejoin the split input user_input = " ".join(split_input) try: # Some common language shorthand codes just in case # en - English # de - German # zh - Chinese # es - Spanish # fr - French # ru - Russian wikipedia.set_lang("en") # I personally like how short yet detailed 3 sentences can be result = wikipedia.summary(user_input, sentences=3) print("\n" + result + "\n") # If the search passed Wolfram yet still finds an issue with disambiguation # then it iterates through the items neatly and restarts the search except wikipedia.exceptions.DisambiguationError as e: print("That didn't work. Try being more specific:\n") for item in e.options: print(item)
def wiki(): wikipedia.set_lang("ru") A = input("о чем найти информацию?") print(wikipedia.summary(A))
from tkinter import * import wikipedia fplus = 0 ad1 = 0 ad2 = 0 ad3 = ad1 wikipedia.set_lang("it") ris1 = 0 fclose = 0 flog = 0 def close(event=None): fclose = 1 def calcolatore(event=None): calc = Tk() calc.title("Calcolatore") calc.geometry("350x500") calc.configure(background="white") calc.resizable(True, True) def Add1(event=None): if fplus == 0: ad1 = (ad1 * 10) + 1 print(ad1) else:
def core_ids(self): #get page ID of core articles wiki.set_lang(lang) self.core_articles = {i:str(int(wiki.WikipediaPage(title=i).pageid)) for i in self.core_article_names} for k, v in self.core_articles.items(): self.all_articles = self.all_articles.append({'title':k, 'pageid':v}, ignore_index = True)
import os from gtts import gTTS import wikipedia wikipedia.set_lang('he') result = wikipedia.summary('Israel', sentences=4) #print(result) file = gTTS(result) file.save('Israel.mp3')
from chatterbot.trainers import ChatterBotCorpusTrainer from chatterbot import ChatBot from selenium import webdriver import pyautogui import time import os import sys import subprocess as s import speech_recognition as sr import pyttsx3 import wikipedia from googlesearch import search import webbrowser wikipedia.set_lang('pt') speaker = pyttsx3.init() voices = speaker.getProperty('voices') bot = ChatBot('Hawk', read_only=True) keywords = [ 'o que é', 'quem é', 'quem foi', 'definição', 'defina', 'onde é', 'onde foi' ] google_keywords = ['pesquise por', 'pesquise'] facebook_keywords = ['facebook', 'mensagem', 'enviar']
import wikipedia import re import asyncio import requests import sys import numpy as np import random from pprint import pprint from bs4 import BeautifulSoup import spacy from spacy.matcher import Matcher nlp = spacy.load('ja_ginza') wikipedia.set_lang("ja") matcher = Matcher(nlp.vocab) # global FEAT_MIN = 15 FEAT_MAX = 70 # SUMMARY_MIN = 20 # SUMMARY_MAX = 70 CAT_MEM_MIN = 10 CAT_MEM_MAX = 1000 CAT_NUM_MAX = 6 PTAGS_MAX = 15 RANDOM_WIKI = 'https://ja.wikipedia.org/wiki/Special:Random' BASE_WIKI = 'https://ja.wikipedia.org/wiki/' def get_tsukami(article=''): pattern = [{
""" Wikipedia module for LingQ reader. """ import wikipedia as wiki import LingQapi as lingqapi import numpy as np import sys wiki.set_lang("de") MAX_WORD_LESSON = 1750 # Collection to upload lessons to DEFAULT_COLLECTION = 713861 #===================================================================================== def SetupWikiArticle(article): # Get article page = GetWikiArticle(article) # Get title title = page.title # Get content of lesson text = page.content # Fix missing paragraph issue text = text.replace('. ', 'AAAAA')
########################################### #Простой wiki бот для вк от MrArthur4Ik :)# ########################################### import requests import wikipedia import vk_api import traceback import random from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from wikipedia.exceptions import PageError wikipedia.set_lang("RU") def get_corona_info(): response = requests.get( "https://api.thevirustracker.com/free-api?global=stats") return response.json() def get_corona_info_in_russia(str): response = requests.get( "https://api.thevirustracker.com/free-api?countryTotal=" + str) return response.json() class RichVkBotLongPoll(VkBotLongPoll): def listen(self): while True: try: for event in self.check():
while running: try: key = input( """# type 'quit' to exit from this page #\n# type 'about' to know about this page # # type hindi to get results in hindi # \nwhat you are looking for ? \nSearch : \n""") if key == "quit": print( "-------------> sucessfully exited from wikipedia <-------------" ) running = False if key == "hindi": wikipedia.set_lang("hi") if key == "about": emoj = (emoji.emojize(":red_heart:", variant="emoji_type")) print( """this page is created to deliver you the most related information about your searches. It is directly connected to the wikipedia to get the latest and most relevant information for you. Thank You {} ~ Mohd Bilal (creator)""".format(emoj)) else: print(wikipedia.summary(key, sentences=5)) except wikipedia.WikipediaException: print(f"Sorry we dont have any information about your input")
async def on_enable(self): SearchModule.cx = os.environ.get("mrvn_image_search_cx") SearchModule.api_key = os.environ.get("mrvn_image_search_apikey") if SearchModule.cx is None or SearchModule.api_key is None: self.logger.error( "[ImageSearch] CX и/или API ключ недоступны. Проверьте \"mrvn_image_search_cx\" и " "\"mrvn_image_search_apikey\" в PATH.") wikipedia.set_lang("ru") @mrvn_command(self, "yt", "Поиск видео в YouTube.", "<поисковый запрос>") class YTCommand(Command): class YoutubeSearch: def __init__(self, search_terms: str, max_results=None): self.search_terms = search_terms self.max_results = max_results self.videos = self.search() def search(self): encoded_search = urllib.parse.quote(self.search_terms) base_url = "https://youtube.com" url = f"{base_url}/results?search_query={encoded_search}" response = requests.get(url).text while 'window["ytInitialData"]' not in response: response = requests.get(url).text results = self.parse_html(response) if self.max_results is not None and len( results) > self.max_results: return results[:self.max_results] return results def parse_html(self, response): results = [] start = (response.index('window["ytInitialData"]') + len('window["ytInitialData"]') + 3) end = response.index("};", start) + 1 json_str = response[start:end] data = json.loads(json_str) videos = data["contents"][ "twoColumnSearchResultsRenderer"]["primaryContents"][ "sectionListRenderer"]["contents"][0][ "itemSectionRenderer"]["contents"] for video in videos: res = {} if "videoRenderer" in video.keys(): video_data = video["videoRenderer"] res["id"] = video_data["videoId"] res["thumbnails"] = [ thumb["url"] for thumb in video_data["thumbnail"]["thumbnails"] ] res["title"] = video_data["title"]["runs"][0][ "text"] res["channel"] = video_data["longBylineText"][ "runs"][0]["text"] res["duration"] = video_data.get( "lengthText", {}).get("simpleText", 0) res["views"] = video_data.get("viewCountText", {}).get( "simpleText", 0) res["url_suffix"] = video_data[ "navigationEndpoint"]["commandMetadata"][ "webCommandMetadata"]["url"] results.append(res) return results def to_dict(self): return self.videos def to_json(self): return json.dumps({"videos": self.videos}) async def execute(self, ctx: CommandContext) -> CommandResult: if len(ctx.args) < 1: return CommandResult.args_error() keyword = " ".join(ctx.clean_args) results = self.YoutubeSearch(keyword, max_results=1).to_dict() if len(results) < 1: return CommandResult.error( "Видео по этому запросу не найдено.") await ctx.message.channel.send( "Видео по запросу \"%s\": (запросил: %s)\n%s" % (keyword, ctx.message.author.mention, "https://youtube.com/" + results[0]["url_suffix"])) return CommandResult.ok() """ url = "https://www.youtube.com/results?search_query=" + urllib.parse.quote_plus(keyword) response = urllib.request.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}): vid_url = "https://www.youtube.com" + vid["href"] if "channel" not in vid_url: await ctx.message.channel.send("Видео по запросу \"%s\": (запросил: %s)\n%s" % ( keyword, ctx.message.author.mention, vid_url)) return CommandResult.ok() return CommandResult.error("Видео по этому запросу не найдено.") """ @mrvn_command(self, "img", "Поиск изображений в Google.", "<поисковый запрос> [--index=<индекс 0 - 4>]") class ImgCommand(Command): @staticmethod async def image_task(ctx, keyword, index): try: async with aiohttp.ClientSession( timeout=ClientTimeout(20)) as session: async with session.get( "https://www.googleapis.com/customsearch/v1", params={ "q": keyword, "num": 5, "start": 1, "searchType": "image", "key": SearchModule.api_key, "cx": SearchModule.cx }) as response: data = await response.json() if response.status != 200: if data["error"][ "status"] == "RESOURCE_EXHAUSTED": await ctx.send_embed( EmbedType.ERROR, "Команда временно недоступна, так как было превышено " "количество запросов к API у бота. Попробуйте " "выполнить её позже.") else: await ctx.send_embed( EmbedType.ERROR, "Произошла ошибка API:\n%s" % data["error"]["status"]) return if data["searchInformation"][ "totalResults"] == "0": await ctx.send_embed( EmbedType.ERROR, "Картинка по запросу \"%s\" не найдена!" % keyword) return max_index = len(data["items"]) - 1 r_index = random.randrange( max_index + 1) if index is None else ( 0 if index not in range(max_index + 1) else index) image = data["items"][r_index] embed: discord.Embed = ctx.get_embed( EmbedType.OK, "", "Картинка по запросу \"%s\" (индекс: %s)" % (keyword, r_index)) embed.set_author( name=image["title"], url=image["image"]["contextLink"], icon_url=image["image"]["thumbnailLink"]) embed.set_image(url=image["link"]) await ctx.message.channel.send(embed=embed) except (asyncio.TimeoutError, aiohttp.ClientConnectionError): await ctx.send_embed(EmbedType.ERROR, "Не удалось подключиться к серверу.") async def execute(self, ctx: CommandContext) -> CommandResult: if SearchModule.cx is None or SearchModule.api_key is None: return CommandResult.error( "Команда не работает, так как API ключ и/или CX " "недоступны. Возможно, бот запущен не в продакшн-среде." ) if len(ctx.clean_args) < 1: return CommandResult.args_error() index = None try: index = int(ctx.keys["index"]) except ValueError: pass except KeyError: pass # if len(ctx.raw_keys) != 0 and ctx.raw_keys[0].startswith("index="): # try: # index = int(ctx.raw_keys[0].split("index=")[1]) # except ValueError: # pass await self.module.bot.module_handler.add_background_task( self.image_task(ctx, " ".join(ctx.clean_args), index), self.module) return CommandResult.ok(wait_emoji=True) @mrvn_command(self, "wiki", "Поиск информации в Wikipedia.", "<поисковый запрос>") class WikiCommand(Command): async def execute(self, ctx: CommandContext) -> CommandResult: if len(ctx.clean_args) < 1: return CommandResult.args_error() query = " ".join(ctx.clean_args) nf_the_search = wikipedia.search(query, results=1) if len(nf_the_search) == 0: return CommandResult.error( "По запросу \"%s\" ничего не найдено." % query) title = nf_the_search[0] while True: try: text = wikipedia.summary(title, sentences=4) except wikipedia.DisambiguationError as e: title = e.options[0] else: break page = wikipedia.page(title) embed: discord.Embed = ctx.get_embed(EmbedType.INFO, text, page.title) if len(page.images) > 0: embed.set_image(url=page.images[0]) await ctx.message.channel.send(embed=embed) return CommandResult.ok()
dormir() elif "consejo" in query: consejo() elif "pelo" in query: pelo() elif "tema" in query: tema() elif "clase" in query: clase() elif "mimir" in query: speak("Bueno, me voy a mimir, te mando un saludo") quit() elif "wikipedia" in query: speak("Buscando...") query = query.replace("wikipedia", "") wikipedia.set_lang("es") result = wikipedia.summary(query, sentences=2) speak(result) # elif "send email" in query: # try: # speak("Que debo decir?") # content = takeCommand() # to = "*****@*****.**" # sendmail(to, content) # speak("Email enviado con éxito") # except Exception as e: # speak(e) # speak("No se pudo enviar el email") elif "buscar en google" in query: speak("Que tengo que buscar?") chromepath = "C:\Program Files (x86)\Google\Chrome\Application\chrome.exe %s"
import json import re import webbrowser import smtplib import requests import urllib import urllib.request as urllib2 from selenium import webdriver from selenium.webdriver.common.keys import Keys from webdriver_manager.chrome import ChromeDriverManager from time import strftime from gtts import gTTS from youtube_search import YoutubeSearch # Khúc này là khai báo các biến cho quá trình làm con Alex wikipedia.set_lang('vi') language = 'vi' path = ChromeDriverManager().install() # Text - to - speech: Chuyển đổi văn bản thành giọng nói def speak(text): print("Bot: {}".format(text)) tts = gTTS(text=text, lang=language, slow=False) tts.save("sound.mp3") playsound.playsound("sound.mp3", False) os.remove("sound.mp3") # Speech - to - text: Chuyển đổi giọng nói bạn yêu cầu vào thành văn bản hiện ra khi máy trả lại kết quả đã nghe def get_audio():
def set_language( self, lang): # Change language of Wikipedia (default is English) wikipedia.set_lang(lang)
def reply(chat): perintah=chat.split(' ') try: if perintah[0] in ['menu','Menu']: arg=''' Author : +6283172366463 Perintah : ------Network Scanner----- ReverseIpLookup <ip> HttpHeader <url> Nmap <ip> ExtractLink <url> AsLookup <ip> GeoIpLookup <ip> Ping <url> MtrTraceroute <ip> ReverseDnsLookup <ip> DnsLookup <url> WhoIs <ip>|<url> ---------other------------ Quotes Proxy Url2png <url> Img-random Tts <text> Translate <kalimat> Wikipedia <kata> About Lapor yt <url> <kualitas> yt2mp3 <url> QrGenerator <text> ''' return arg elif perintah[0].lower() == 'wikipedia': wikipedia.set_lang('id') v=wikipedia.page(' '.join(perintah[1:])) return 'judul : %s\nsumber : %s \n %s\n\npencarian lainnya : %s'%(v.title,v.url,v.summary,'\n ~ '.join(wikipedia.search(perintah[1:]))) elif perintah[0] in ['ReverseIpLookup','reverseiplookup']: #ip return requests.get(f'https://api.hackertarget.com/reverseiplookup/?q={perintah[1]}').text elif perintah[0] in ['HttpHeader','httpheader']: #url return requests.get(f'https://api.hackertarget.com/httpheaders/?q={perintah[1]}').text elif perintah[0] in ['Nmap','nmap']: #ip return requests.get(f'https://api.hackertarget.com/nmap/?q={perintah[1]}').text elif perintah[0] in ['ExtractLink','extractlink']: #url return requests.get(f'https://api.hackertarget.com/pagelinks/?q={perintah[1]}').text elif perintah[0] in ['AsLookup','aslook']: #ip return requests.get(f'https://api.hackertarget.com/aslookup/?q={perintah[1]}').text elif perintah[0] in ['GeoIpLookup','geoiplookup']: #ip return requests.get(f'https://api.hackertarget.com/geoip/?q={perintah[1]}').text elif perintah[0] in ['Ping','ping']: #url return requests.get(f'https://api.hackertarget.com/nping/?q={perintah[1]}').text elif perintah[0] in ['MtrTraceroute','mtrtraceroute']: return requests.get(f'https://api.hackertarget.com/mtr/?q={perintah[1]}').text elif perintah[0] in ['ReverseDnsLookup','reversednslookup']: #dns return requests.get(f'https://api.hackertarget.com/reversedns/?q={perintah[1]}').text elif perintah[0] in ['DnsLookup','dnslookup']: # url return requests.get(f'https://api.hackertarget.com/dnslookup/?q={perintah[1]}').text elif perintah[0] in ['WhoIs','whois']: return requests.get(f'https://api.hackertarget.com/whois/?q={perintah[1]}').text elif perintah[0].lower() == 'translate': arti=translator.translate(chat[len(perintah[0]):],dest='id') return arti.text elif perintah[0] in ['Quotes','quotes']: cv=json.loads(requests.get('https://api.quotable.io/random').text) arti=translator.translate(cv['content'],dest='id') artag=translator.translate(cv['tags'][0], dest='id') return 'Author : '+cv['author']+'\ntags : '+artag.text+'\n\n'+arti.text elif perintah[0].lower() in ['lapor','report']: return 'https://wa.me/+6283172366463' elif perintah[0] in ['Proxy','proxy']: vv=open('pi').read().split('\n') return random.choice(vv) else: res=requests.get('https://secureapp.simsimi.com/v1/simsimi/talkset?uid=287126054&av=6.8.9.4&lc=id&cc=&tz=Asia%2FJakarta&os=a&ak=pNfLbeQT%2B0cnFY8YHQb7CNHowpg%3D&message_sentence='+urllib.parse.quote(chat)+'&normalProb=8&isFilter=1&talkCnt=2&talkCntTotal=2&reqFilter=1&session=XZzaduTVCSqa6vMtuyFhGv9eCXiyWwKJVETZjpQjc2oLPGBN2XtpzcKRFhLukHd6EAYVWMiSGuPzQV5Vwcdmwz14&triggerKeywords=%5B%5D').text return json.loads(res)['simsimi_talk_set']['answers'][0]['sentence'] except IndexError: return 'argument tidak valid ketik menu untuk menampilkan semua perintah'
import wikipedia wikipedia.set_lang('ru') def search_wiki(word): print(word) try: w = wikipedia.search(word) if w: w2 = wikipedia.page(word).url # print(w2) w1 = wikipedia.summary(word) return w1, "\nСсылка: " + w2 return "Запрос в википедии не найден", "" # None except: return "Запрос в википедии не найден", "" # None
def get_snippets(lang, n, featured, good, timeout): """ Args: lang (str) n (int) featured (Dict[str, :class:`wikipediaapi.WikipediaPage`] or None) good (Dict[str, :class:`wikipediaapi.WikipediaPage`] or None) timeout (int) Returns: List[str] """ wikipedia.set_lang(lang) all_snippets = [] seen_pages = set() start_time = time.time() with tqdm(total=n, unit="snippets", unit_scale=True, leave=True) as pbar: while len(all_snippets) < n and (time.time() - start_time) < timeout: titles = [] if featured: try: featured_titles = get_category_members_from_lang_links( featured, lang, "Category:Featured_articles" ) titles.extend(featured_titles) except Exception: logging.exception("unable to get featured article titles") continue finally: featured = None if good: try: good_titles = get_category_members_from_lang_links( good, lang, "Category:Good_articles" ) titles.extend(good_titles) except Exception: logging.exception("unable to get good article titles") continue finally: good = None try: random_titles = wikipedia.random(min(25, n)) titles.extend(random_titles) except Exception: logging.exception("unable to get random page titles") continue for title in titles: if title in seen_pages or "/" in title: continue try: page = wikipedia.page(title=title) except Exception: logging.debug("unable to fetch wiki page '%s'", title) continue try: snippets = extract_snippets_from_page(page, exclude_en=lang != "en") except Exception: logging.exception("unable to extract snippets from page '%s'", title) continue all_snippets.extend(snippets) pbar.update(len(snippets)) # break out of random title loop early if we can if len(all_snippets) >= n: break return all_snippets[:n]
def sluchai(): A = int(input("введи число интервала:")) B = int(input("введи конец интервала:")) C = int(input("1 - целое,2 - дробное:")) if C==1: print(random.randit(A,B)) elif C==2: print(random.uniform(A,B)) def wiki(): wikipedia.set_lang("ru") A = input("о чем найти информацию?") print(wikipedia.summary(A)) wikipedia.set_lang("ru") word = "" print(Fore.LIGHTRED_EX + 'Добрый вечер Господин!') say("Добрый веер Господин!") print(Fore.MAGENTA + 'Меня зовут Деламэин') say("Меня зовут Дэламэин") print(Fore.BLUE + 'Чего желаете?') say("Чего желаете?") while word!="exit": print(Fore.LIGHTCYAN_EX + "1 - случайное число, 2 - wikipedia,3 - погода") # Рассказать пользователю о возможностях: команды, выход и т.д. word = input("Введи команду:") if word=="1": sluchai()
import wikipedia import sys import time language = "fr" if len(sys.argv)>=2: language = str(sys.argv[1]) filename = time.strftime("%d%m%Y%H%M") + language f = file(filename, 'w') wikipedia.set_lang(language) #titouan = wikipedia.page("New York") #text = titouan.content.encode('utf-8') #f.write(text) for x in range(0,100) : try: pageName = wikipedia.random() print str(x) + " : " + pageName + "\n" page = wikipedia.page(pageName) text = page.content.encode('utf-8') f.write(text) except Exception as exception: print "No loaded\n" f.close()
def summary_keyword(line_event, keyword): lang = get_language(line_event) wikipedia.set_lang(lang) return wikipedia.summary(keyword, sentences=5)
def __init__(self, artigo): self.artigo = artigo wikipedia.set_lang('PT') self.pesquisa = wikipedia.page(artigo)
import wikipedia import spacy import time spacy.require_gpu() nlp = spacy.load('en') wikipedia.set_lang("en") page_4 = wikipedia.page("Deepwater Horizon oil spill") data_4 = nlp(page_4.content) print("------------------------------") print(len(data_4)) print("GPU:: data_4 done") print("------------------------------")
def get(self, params=None): """ gets the answer from the answer template :param params: msg = params[0], func = params[1] :return: returns the first template if is_random is false, otherwise returns random template """ msg = (params[0] + " " + u"סרט ויקיפדיה").encode('utf-8') paranthesis_pattern = "[\(].*?[\)]" try: b = Browser() gs = GoogleSearch(msg) gs.results_per_page = 50 results = gs.get_results() for res in results: # print res.title.replace('ynet','') try: if (res.url is not None): page = b.get_page(res.url) soup = BeautifulSoup(page) title = soup.find("title") if (title is not None): res = title.text.split('-')[0] re.sub(paranthesis_pattern, "", res) wikipedia.set_lang('He') title = wikipedia.search(res) if (len(title) == 0): return u"לא מכיר את הסרט הזה" wiki_summary = wikipedia.summary(title[0]) if (wiki_summary is None): return title + "?" paranthesis_values = re.findall( paranthesis_pattern, wiki_summary) english_name = "" for value in paranthesis_values: if (u'באנגלית' in value): english_name = re.sub( r'[^a-zA-Z ]', '', value.encode('utf-8')) # english_name = value if (english_name == ""): return u"וואלה לא ראיתי את הסרט הזה, הביקורות עליו טובות?" query = 'www.imdb.com:' + english_name gs = GoogleSearch(query) gs.results_per_page = 50 results = gs.get_results() for imdb_res in results: if (imdb_res.url is not None): page = b.get_page(imdb_res.url) soup = BeautifulSoup(page) rating = soup.find("span", {"class": "rating"}) if (rating is not None): rate = float(rating.next) return self.get_response_by_rate(rate) break except Exception as e: print(e) return u"לא מכיר את הסרט הזה" return u"לא מכיר את הסרט הזה" except SearchError, e: print "Search failed: %s" % e
@License : (C)Copyright 2019-2020 @Modify Time @Author @Version @Desciption ------------ ------- -------- ----------- 2019-11-19 15:21 tangyubao 1.0 None ''' # import lib import wikipedia # try: # mercury = wikipedia.summary("IU") # except wikipedia.exceptions.DisambiguationError as e: # print(e.options) # print(wikipedia.summary('IU',sentences=2)) wikipedia.set_lang('en') # words_list=['IU','Smita Patil','Raj Babbar','Krishna','Rukmini'] words_list = ['davenport municipal airport'] # descrip=[] # for w in words_list: # # descrip.append(wikipedia.summary(w,sentences=2)) # try: # mercury = wikipedia.summary(w,sentences=2) # except wikipedia.exceptions.DisambiguationError as e: # print(e.options) # w=wikipedia.suggest(w) # mercury = wikipedia.summary(w, sentences=2) # descrip.append(mercury) # print(descrip) # print(wikipedia.suggest('IU')) # print(wikipedia.summary('davenport municipal airport'))
def bot(op): try: if op.type == 0: return if op.type == 5: if wait["autoAdd"] == True: cl.findAndAddContactsByMid(op.param1) if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 13: cl.acceptGroupInvitation(op.param1) cl.sendText(op.param1, "Ketik Help Untuk Liat Menu") #------Protect Group Kick start------# if op.type == 11: if wait["Protectgr"] == True: if cl.getGroup(op.param1).preventJoinByTicket == False: if op.param2 in Bots: pass if op.param2 in admin: pass else: try: cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir") cl.kickoutFromGroup(op.param1,[op.param2]) X = cl.getGroup(op.param1) X.preventJoinByTicket = True cl.updateGroup(X) except: random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir") random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) Z = random.choice(KAC).getGroup(op.param1) Z.preventJoinByTicket = True random.choice(KAC).updateGroup(Z) #------Protect Group Kick finish-----# #------Cancel Invite User start------# if op.type == 13: if wait["Protectcancl"] == True: group = cl.getGroup(op.param1) gMembMids = [contact.mid for contact in group.invitee] if op.param2 in Bots: pass if op.param2 in admin: pass else: random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids) random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel😛") #------Cancel Invite User Finish------# if op.type == 13: if mid in op.param3: if wait["autoJoin"] == True: if op.param2 in Bots or owner: cl.acceptGroupInvitation(op.param1) else: cl.rejectGroupInvitation(op.param1) else: print "autoJoin is Off" #------Joined User Kick start------# if op.type == 19: #Member Ke Kick if op.param2 in Bots: pass elif op.param2 in admin: pass elif op.param2 in whitelist: pass else: try: cl.kickoutFromGroup(op.param1,[op.param2]) wait["blacklist"][op.param2] = True #f=codecs.open('st2__b.json','w','utf-8') #json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) except: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) wait["blacklist"][op.param2] = True #f=codecs.open('st2__b.json','w','utf-8') #json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) if op.type == 22: if wait["leaveRoom"] == False: cl.leaveRoom(op.param1) if op.type == 24: if wait["leaveRoom"] == False: cl.leaveRoom(op.param1) if op.type == 26: msg = op.message if msg.toType == 1: if wait["leaveRoom"] == False: cl.leaveRoom(msg.to) if msg.contentType == 16: url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post") cl.like(url[25:58], url[66:], likeType=1001) if op.type == 26: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: cl.sendText(msg.to,"already") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False cl.sendText(msg.to,"decided not to comment") elif wait["dblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: del wait["commentBlack"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"deleted") wait["dblack"] = False else: wait["dblack"] = False cl.sendText(msg.to,"It is not in the black list") elif wait["wblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: cl.sendText(msg.to,"already") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False cl.sendText(msg.to,"aded") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"deleted") wait["dblacklist"] = False else: wait["dblacklist"] = False cl.sendText(msg.to,"It is not in the black list") elif wait["contact"] == True: msg.contentType = 0 cl.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) else: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) elif msg.contentType == 16: if wait["timeline"] == True: msg.contentType = 0 if wait["lang"] == "JP": msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"] else: msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"] cl.sendText(msg.to,msg.text) elif msg.text is None: return elif msg.text in ["Key","help","Help"]: if wait["lang"] == "JP": cl.sendText(msg.to,helpMessage) else: cl.sendText(msg.to,helpt) elif msg.text in ["Admin menu"]: if msg.from_ in admin: if wait["lang"] == "JP": cl.sendText(msg.to,Setgroup) else: cl.sendText(msg.to,Sett) elif ("Gn " in msg.text): if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gn ","") cl.updateGroup(X) else: cl.sendText(msg.to,"It can't be used besides the group.") elif "Kick " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Kick ","") random.choice(KAC).kickoutFromGroup(msg.to,[midd]) elif "Invite " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Invite ","") cl.findAndAddContactsByMid(midd) cl.inviteIntoGroup(msg.to,[midd]) #-----------------BATAS WILAYAH KEKUASAAN-----------------# if msg.text in ["Tagall","tagall","Tag all","tag all"]: group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] cb = "" cb2 = "" strt = int(0) akh = int(0) for md in nama: akh = akh + int(6) cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.$ strt = strt + int(7) akh = akh + 1 cb2 += "@nrik \n" cb = (cb[:int(len(cb)-1)]) msg.contentType = 0 msg.text = cb2 msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'} try: kc.sendMessage(msg) except Exception as error: print error #-----------------BATAS WILAYAH KEKUASAAN------------------# #--------------- SC Add Admin --------- elif "Admin add @" in msg.text: if msg.from_ in owner: print "[Command]Staff add executing" _name = msg.text.replace("Admin add @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: random.choice(KAC).sendText(msg.to,"Contact not found") else: for target in targets: try: admin.append(target) cl.sendText(msg.to,"Admin Ditambahkan") except: pass print "[Command]Staff add executed" else: cl.sendText(msg.to,"Perintah Ditolak.") cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.") elif "Admin remove @" in msg.text: if msg.from_ in owner: print "[Command]Staff remove executing" _name = msg.text.replace("Admin remove @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: random.choice(KAC).sendText(msg.to,"Contact not found") else: for target in targets: try: admin.remove(target) cl.sendText(msg.to,"Admin Dihapus") except: pass print "[Command]Staff remove executed" else: cl.sendText(msg.to,"Perintah Ditolak.") cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.") elif msg.text in ["Adminlist","adminlist"]: if admin == []: cl.sendText(msg.to,"The stafflist is empty") else: cl.sendText(msg.to,"Tunggu...") mc = "||Admin One Piece Bot||\n=====================\n" for mi_d in admin: mc += "••>" +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) print "[Command]Stafflist executed" #-------------------------------------- elif msg.text in ["Speed","Sp"]: if msg.from_ in owner: start = time.time() cl.sendText(msg.to, "Wait...") elapsed_time = time.time() - start cl.sendText(msg.to, "%sDetik" % (elapsed_time)) elif "Steal cover @" in msg.text: if msg.from_ in admin: print "[Command]dp executing" _name = msg.text.replace("Steal cover @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendImageWithURL(msg.to, path) except: pass print "[Command]dp executed" elif "Steal pict " in msg.text: if msg.from_ in admin: if msg.toType == 2: msg.contentType = 0 steal0 = msg.text.replace("Steal pict ","") steal1 = steal0.lstrip() steal2 = steal1.replace("@","") steal3 = steal2.rstrip() _name = steal3 group = cl.getGroup(msg.to) targets = [] for g in group.members: if _name == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"not found") else: for target in targets: try: contact = cl.getContact(target) try: image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus except: image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg" try: cl.sendImageWithURL(msg.to,image) except Exception as error: cl.sendText(msg.to,(error)) pass except: cl.sendText(msg.to,"Error!") break else: cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup") #-------------- Add Friends ------------ elif "Bot Add @" in msg.text: if msg.toType == 2: if msg.from_ in owner: print "[Command]Add executing" _name = msg.text.replace("Bot Add @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: random.choice(KAC).sendText(msg.to,"Contact not found") else: for target in targets: try: cl.findAndAddContactsByMid(target) except: cl.sendText(msg.to,"Error") else: cl.sendText(msg.to,"Perintah Ditolak.") cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.") #-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D elif "Allbio:" in msg.text: if msg.from_ in owner: string = msg.text.replace("Allbio:","") if len(string.decode('utf-8')) <= 500: profile = cl.getProfile() profile.statusMessage = string cl.updateProfile(profile) cl.sendText(msg.to,"Bio berubah menjadi " + string + "") #--------------=Finish=---------------- elif 'instagram ' in msg.text.lower(): try: instagram = msg.text.lower().replace("instagram ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html5lib') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() user = "******" + text[-2] + "\n" user1 = "Username: "******"\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "**INSTAGRAM INFO USER**\n" details = "\n**INSTAGRAM INFO USER**" cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) cl.sendImageWithURL(msg.to, text1[0]) except Exception as njer: cl.sendText(msg.to, str(njer)) #--------------= SC Ganti nama Owner=-------------- elif "Myname:" in msg.text: if msg.from_ in owner: string = msg.text.replace("Myname:","") if len(string.decode('utf-8')) <= 20: profile = cl.getProfile() profile.displayName = string cl.updateProfile(profile) cl.sendText(msg.to,"Update Name Menjadi : " + string + "") #-------------- copy profile---------- elif "Spam: " in msg.text: if msg.from_ in admin: txt = msg.text.split(" ") jmlh = int(txt[2]) teks = msg.text.replace("Spam: ")+str(txt[1])+" "+str(jmlh + " ","") tulisan = jmlh * (teks+"\n") #@reno.a.w if txt[1] == "on": if jmlh <= 300: for x in range(jmlh): cl.sendText(msg.to, teks) else: cl.sendText(msg.to, "Kelebihan batas:v") elif txt[1] == "off": if jmlh <= 300: cl.sendText(msg.to, tulisan) else: cl.sendText(msg.to, "Kelebihan batas :v") #-----------------=Selesai=------------------ elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot if msg.from_ in admin: msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif msg.text in ["Me"]: msg.contentType = 13 msg.contentMetadata = {'mid': msg.from_} random.choice(KAC).sendMessage(msg) elif "jointicket " in msg.text.lower(): rplace=msg.text.lower().replace("jointicket ") if rplace == "on": wait["atjointicket"]=True elif rplace == "off": wait["atjointicket"]=False cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"])) elif '/ti/g/' in msg.text.lower(): link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?') links = link_re.findall(msg.text) n_links=[] for l in links: if l not in n_links: n_links.append(l) for ticket_id in n_links: if wait["atjointicket"] == True: group=cl.findGroupByTicket(ticket_id) cl.acceptGroupInvitationByTicket(group.mid,ticket_id) cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name)) elif "Info Group" == msg.text: if msg.toType == 2: if msg.from_ in admin: ginfo = cl.getGroup(msg.to) try: gCreator = ginfo.creator.displayName except: gCreator = "Error" if wait["lang"] == "JP": if ginfo.invitee is None: sinvitee = "0" else: sinvitee = str(len(ginfo.invitee)) if ginfo.preventJoinByTicket == True: QR = "Close" else: QR = "Open" random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee) else: random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif "My mid" == msg.text: if msg.from_ in admin: random.choice(KAC).sendText(msg.to, msg.from_) elif "Mid Bot" == msg.text: if msg.from_ in admin: cl.sendText(msg.to,mid) elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ :開"]: if msg.from_ in admin: if wait["autoJoin"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"done") else: wait["autoJoin"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"done") elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ :關"]: if msg.from_ in admin: if wait["autoJoin"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"already off") else: cl.sendText(msg.to,"done") else: wait["autoJoin"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already off") else: cl.sendText(msg.to,"done") elif "album merit " in msg.text: gid = msg.text.replace("album merit ","") album = cl.getAlbum(gid) if album["result"]["items"] == []: if wait["lang"] == "JP": cl.sendText(msg.to,"There is no album") else: cl.sendText(msg.to,"相册没在。") else: if wait["lang"] == "JP": mg = "The following is the target album" else: mg = "以下是对象的相册" for y in album["result"]["items"]: if "photoCount" in y: mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n" else: mg += str(y["title"]) + ":0sheet\n" cl.sendText(msg.to,mg) elif "album " in msg.text: gid = msg.text.replace("album ","") album = cl.getAlbum(gid) if album["result"]["items"] == []: if wait["lang"] == "JP": cl.sendText(msg.to,"There is no album") else: cl.sendText(msg.to,"相册没在。") else: if wait["lang"] == "JP": mg = "The following is the target album" else: mg = "以下是对象的相册" for y in album["result"]["items"]: if "photoCount" in y: mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n" else: mg += str(y["title"]) + ":0sheet\n" elif "album remove " in msg.text: gid = msg.text.replace("album remove ","") albums = cl.getAlbum(gid)["result"]["items"] i = 0 if albums != []: for album in albums: cl.deleteAlbum(gid,album["id"]) i += 1 if wait["lang"] == "JP": cl.sendText(msg.to,str(i) + "Deleted albums") else: cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚") elif msg.text in ["Group id"]: gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i) cl.sendText(msg.to,h) elif msg.text in ["Cancelall"]: if msg.from_ in admin: gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) if wait["lang"] == "JP": cl.sendText(msg.to,"All invitations have been refused") else: cl.sendText(msg.to,"æ‹’ç»�了全部的邀请。") elif "album removeat’" in msg.text: gid = msg.text.replace("album removeat’","") albums = cl.getAlbum(gid)["result"]["items"] i = 0 if albums != []: for album in albums: cl.deleteAlbum(gid,album["id"]) i += 1 if wait["lang"] == "JP": cl.sendText(msg.to,str(i) + "Albums deleted") else: cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚") elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]: if msg.from_ in admin: if wait["autoAdd"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"Done") else: wait["autoAdd"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Done") else: cl.sendText(msg.to,"è¦�了开。") #--------------------------------# elif msg.text == "Cctv": #if msg.from_ in admin: cl.sendText(msg.to, "Cek Sider On") try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] except: pass now2 = datetime.now() wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M") wait2['ROM'][msg.to] = {} #print wait2 elif msg.text == "Ciduk": # if msg.from_ in admin: if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: chiya = "" else: chiya = "" for rom in wait2["ROM"][msg.to].items(): #print rom chiya += rom[1] + "\n" cl.sendText(msg.to, "|Readers||%s\n\n\n||Ignored||\n%s\n||Bot By OSIS MAN2BDG||\n\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to])) else: cl.sendText(msg.to, "Ketik Cctv dulu Sayang♥\nBaru Ketik Ciduk ♥") #-------------Fungsi Broadcast Start------------# elif "Bc " in msg.text: # if msg.from_ in owner: bctxt = msg.text.replace("Bc ","") a = cl.getGroupIdsJoined() for taf in a: cl.sendText(taf, (bctxt)) #--------------Fungsi Broadcast Finish-----------# #------------ Keluarin Bot----------------------# elif msg.text in ["Bot out","@bye"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D) #if msg.from_ in owner: gid = cl.getGroupIdsJoined() if wait["lang"] == "JP": cl.sendText(msg.to,"Bye Aku Out Dulu♥") for i in gid: cl.leaveGroup(i) else: cl.sendText(msg.to,"He declined all invitations") #---------------------------------------------# elif text.lower() == 'tagall': group = client.getGroup(receiver) nama = [contact.mid for contact in group.members] nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama) if jml <= 100: client.mention(receiver, nama) if jml > 100 and jml < 200: for i in range(0, 100): nm1 += [nama[i]] client.mention(receiver, nm1) for j in range(101, len(nama)): nm2 += [nama[j]] client.mention(receiver, nm2) if jml > 200 and jml < 300: for i in range(0, 100): nm1 += [nama[i]] client.mention(receiver, nm1) for j in range(101, 200): nm2 += [nama[j]] client.mention(receiver, nm2) for k in range(201, len(nama)): nm3 += [nama[k]] client.mention(receiver, nm3) if jml > 300 and jml < 400: for i in range(0, 100): nm1 += [nama[i]] client.mention(receiver, nm1) for j in range(101, 200): nm2 += [nama[j]] client.mention(receiver, nm2) for k in range(201, len(nama)): nm3 += [nama[k]] client.mention(receiver, nm3) for l in range(301, len(nama)): nm4 += [nama[l]] client.mention(receiver, nm4) if jml > 400 and jml < 501: for i in range(0, 100): nm1 += [nama[i]] client.mention(receiver, nm1) for j in range(101, 200): nm2 += [nama[j]] client.mention(receiver, nm2) for k in range(201, len(nama)): nm3 += [nama[k]] client.mention(receiver, nm3) for l in range(301, len(nama)): nm4 += [nama[l]] client.mention(receiver, nm4) for m in range(401, len(nama)): nm5 += [nama[m]] client.mention(receiver, nm5) client.sendText(receiver, "Members :"+str(jml)) elif "Apakah " in msg.text: tanya = msg.text.replace("Apakah ","") jawab = ("Ya","Tidak","Mungkin","Bisa jadi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif "Wikipedia " in msg.text: try: wiki = msg.text.lower().replace("Wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=1) pesan+="\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except: try: pesan="Over Text Limit! Please Click link\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except Exception as e: cl.sendText(msg.to, str(e)) #-------------Creator------------------# elif msg.text in ["Creator"]: msg.contentType = 13 msg.contentMetadata = {'mid': 'u7e96d3b3f5b37ccffbebd18aa343e5f3'} cl.sendMessage(msg) cl.sendText(msg.to,"Itu Kak Creator Bot Kami") #-------------Finish----------------# #---------CCTV----------- if op.type == 55: try: if op.param1 in wait2['readPoint']: Name = cl.getContact(op.param2).displayName if Name in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += "\n[•]" + Name wait2['ROM'][op.param1][op.param2] = "[•]" + Name else: cl.sendText except: pass #--------------------- if op.type == 17: if op.param2 in Bots: return ginfo = cl.getGroup(op.param1) random.choice(KAC).sendText(op.param1, "Selamat Datang Di Grup " + str(ginfo.name)) random.choice(KAC).sendText(op.param1, "Creator Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName) print "MEMBER HAS JOIN THE GROUP" if op.type == 15: if op.param2 in Bots: return random.choice(KAC).sendText(op.param1, "") print "MEMBER HAS LEFT THE GROUP" #------------------------ if op.type == 59: print op except Exception as error: print error
import urllib.parse import urllib.request from bs4 import BeautifulSoup import wikipedia import pandas as pd import re wikipedia.set_lang("fa") url = 'https://fa.wikipedia.org/w/index.php' headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 ' 'Safari/537.17' } def preprocess(rawText): # after_word = "== منابع ==" # cleaned_text = rawText[:rawText.index(after_word)] cleaned_text = re.sub("=.*?=", '', rawText, flags=re.DOTALL) cleaned_text = cleaned_text.replace('\n\n', '\n') cleaned_text = cleaned_text.replace(' ', ' ') cleaned_text = cleaned_text.replace('\n\n', '\n') return cleaned_text def getValues(topic, limit, offset): return { 'search': topic, 'title': 'ویژه:جستجو',
def start(update, context): if update.effective_chat.type == "private": args = context.args if len(args) >= 1: if args[0].lower() == "help": send_help(update.effective_chat.id, tl(update.effective_message, HELP_STRINGS)) elif args[0].lower() == "get_notes": update.effective_message.reply_text( tl(update.effective_message, "Anda sekarang dapat mengambil catatan di grup.")) elif args[0].lower().startswith("stngs_"): match = re.match("stngs_(.*)", args[0].lower()) chat = dispatcher.bot.getChat(match.group(1)) if is_user_admin(chat, update.effective_user.id): send_settings(match.group(1), update.effective_user.id, False) else: send_settings(match.group(1), update.effective_user.id, True) elif args[0][1:].isdigit() and "rules" in IMPORTED: IMPORTED["rules"].send_rules(update, args[0], from_pm=True) elif args[0][:4] == "wiki": wiki = args[0].split("-")[1].replace('_', ' ') message = update.effective_message getlang = langsql.get_lang(message) if getlang == "id": wikipedia.set_lang("id") pagewiki = wikipedia.page(wiki) judul = pagewiki.title summary = pagewiki.summary if len(summary) >= 4096: summary = summary[:4000] + "..." message.reply_text("<b>{}</b>\n{}".format(judul, summary), parse_mode=ParseMode.HTML, reply_markup=InlineKeyboardMarkup([[ InlineKeyboardButton(text=tl( update.effective_message, "Baca di Wikipedia"), url=pagewiki.url) ]])) elif args[0][:6].lower() == "verify": chat_id = args[0].split("_")[1] verify_welcome(update, context, chat_id) else: first_name = update.effective_user.first_name buttons = InlineKeyboardMarkup( [[ InlineKeyboardButton(text="💭 Language", callback_data="main_setlang"), InlineKeyboardButton(text="⚙️ Connect Group", callback_data="main_connect") ], [ InlineKeyboardButton(text="👥 Support Group", url="https://t.me/slpcgame"), InlineKeyboardButton(text="🔔 Update Channel", url="https://t.me/slpcgames") ], [ InlineKeyboardButton( text="❓ Help", url="https://t.me/{}?start=help".format( context.bot.username)), InlineKeyboardButton( text="💖 About Developer", url="https://slcreedtest.000webhostapp.com/") ], [ InlineKeyboardButton( text="🎉 Add me to your group", url="https://t.me/{}?startgroup=new".format( context.bot.username)) ]]) update.effective_message.reply_photo( DAISY_IMG, tl(update.effective_message, PM_START_TEXT).format( escape_markdown(first_name), escape_markdown(context.bot.first_name), OWNER_ID), disable_web_page_preview=True, parse_mode=ParseMode.MARKDOWN, reply_markup=buttons) else: update.effective_message.reply_text( tl(update.effective_message, "Ada yang bisa saya bantu? 😊"))
def handle_message(event): text = event.message.text #simplify for receove message sender = event.source.user_id #get user_id gid = event.source.sender_id #get group_id try: if veri[gid] == True: codex[gid] = codex[gid] + 1 except: veri[gid] = False codex[gid] = 0 try: if veri[gid] == True: if codex[gid] == 5: line_bot_api.push_message( gid, TextSendMessage(text="โค้ดหมดอายุแล้ว")) veri[gid] = False codex[gid] = 0 except: veri[gid] = False codex[gid] = 0 if text.startswith("/verify"): try: if veri[gid] == True: line_bot_api.push_message( gid, TextSendMessage(text="บอทได้รับการยืนยันเรียบร้อยแล้ว")) else: try: separate = text.split(" ") search = text.replace(separate[0] + " ", "") if search == code[gid]: line_bot_api.push_message( gid, TextSendMessage(text="ยืนยันสำเร็จ")) veri[gid] = True else: line_bot_api.push_message( gid, TextSendMessage(text="โค้ดยืนยันไม่ถูกต้อง")) except: return except: veri[gid] = False line_bot_api.push_message( gid, TextSendMessage(text="บอทได้รับการยืนยันเรียบร้อยแล้ว")) if text.startswith("/"): try: if veri[gid] == False: n = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "A", "B", "C", "D", "E", "F", "G" ] b = "" for x in range(5): b += random.choice(n) try: code[gid] = b line_bot_api.push_message( gid, TextSendMessage(text="พิพม์ /verify " + code[gid] + "\nเพื่อยืนยันบอท")) return except: code[gid] = b line_bot_api.push_message( gid, TextSendMessage(text="พิพม์ /verify " + code[gid] + "\nเพื่อยืนยันบอท")) return except: code[gid] = "" veri[gid] = False return if groupcastt != "no": try: if groupcast[gid] == False: groupcast[gid] = True h = "[ ประกาศ ]\n\n" + groupcastt line_bot_api.push_message(gid, TextSendMessage(text=h)) except: groupcast[gid] = False """if text.startswith("/broadcast"): separate = text.split(" ") textt = text.replace(separate[0] + " ","") if(event.source.user_id == "Udaa0a2f396dd41e4398b106d903d92fd"): line_bot_api.reply_message(gid, TextSendMessage(text="ตั้งข้อความประกาศว่า " + textt)) groupcastt = textt groupcast = {} else: line_bot_api.reply_message(event.reply_token, TextSendMessage(text="ผู้ใช้นี้ไม่ได้รับอนุญาต")) """ #try: # if groupcast[gid] == True: # line_bot_api.push_message(gid, TextSendMessage(text="[ ประกาศ ]\n"+groupcastt)) # groupcast[gid] = False # elif groupcast[gid] == False: # groupcast = False # else: # line_bot_api.push_message(gid, TextSendMessage(text="[ ประกาศ ]\n"+groupcastt)) # groupcast[gid] = True #except Exception as Error: # groupcast[gid] = False # line_bot_api.push_message(gid, TextSendMessage(text="[ ประกาศ ]\n"+groupcastt)) try: if veri[gid] == True: if text.isdigit(): b = int(text) reverse = 0 while (b > 0): reminder = b % 10 reverse = (reverse * 10) + reminder b = b // 10 x = int(text) + 1 line_bot_api.push_message(gid, TextSendMessage(text=x)) except: veri[gid] = False """if text.startswith("/graph"): try: headers = {"Authorization": "Bearer ya29.GlsMBisE2cNscXj8RW1UP32SVEkIOJ8z1rx4oE2tQGRXxomt1t6rxoM9L11EH3pm5mKK3uIlxfytEuwN3y-4uM0eoMsFo8BjpQglayMH1E-0y5tNW0wwr4MP2nc4"} x = [1,2,3] y = [2,4,1] plt.plot(x, y) plt.xlabel('x - axis') plt.ylabel('y - axis') plt.title('[ By PASUNx ]') plt.savefig('b.png', dpi=100) para = { "name": "b.png", "parents": ["1ohcThxOTwMY-wLeP4UWaBTf_Dc7Fyr-b"] } files = { 'data': ('metadata', json.dumps(para), 'application/json; charset=UTF-8'), 'file': open("./b.png", "rb") } r = requests.post( "https://www.googleapis.com/upload/drive/v3/files?uploadType=multipart", headers=headers, files=files ) t = r.json() txt = "https://drive.google.com/file/d/" + t["id"] + "/view" line_bot_api.push_message(gid, TextSendMessage(text=txt)) except Exception as Err: line_bot_api.push_message(gid, TextSendMessage(text="THIS IS BETA"))""" if text.startswith("/weather"): weatherurl = 'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22bangkok%2C%20th%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys' req = requests.get(weatherurl) x = req.json() aa = x["query"]["results"]["channel"]["location"] ab = x["query"]["results"]["channel"]["wind"] ac = x["query"]["results"]["channel"]["atmosphere"] ad = x["query"]["results"]["channel"]["astronomy"] b = aa["city"] c = ab["chill"] d = ab["direction"] e = ab["speed"] f = ac["humidity"] g = ac["pressure"] h = ac["rising"] i = ac["visibility"] j = ad["sunrise"] k = ad["sunset"] o = str((int(c) - 32) / 1.8) o = o[:o.index('.')] txt = "สภาพอากาศ กรุงเทพมหานคร" + "\n──────────────\n" txt += "อุณหถูมิ " + o + " ℃" txt += "\nลม\nความเย็น " + c txt += "\nทิศทาง " + d txt += "\nความเร็ว " + e + " mph" txt += "\n\nบรรยากาศ\nความชื้น " + f txt += "\nความดัน " + g + " in" txt += "\nที่เพิ่มสูงขึ้น " + h txt += "\nความชัดเจน " + i txt += "\n\nพระอาทิตย์ขึ้น " + j txt += "\nพระอาทิตย์ตกดิน " + k line_bot_api.push_message(gid, TextSendMessage(text=txt)) if text.startswith("/divide"): separate = text.split(" ") try: t1 = int(text.split(" ")[1]) t2 = int(text.split(" ")[2]) txt = str(t1) + " / " + str(t2) + "\n──────────────" txt += "\n" + str(t1 / t2) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage( text="วิธีการใช้งาน:\n/divide [ ตัวเลข ] [ ตัวเลข ]")) if text.startswith("/plus"): separate = text.split(" ") try: t1 = int(text.split(" ")[1]) t2 = int(text.split(" ")[2]) txt = str(t1) + " + " + str(t2) + "\n──────────────" txt += "\n" + str(t1 + t2) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage( text="วิธีการใช้งาน:\n/plus [ ตัวเลข ] [ ตัวเลข ]")) if text.startswith("/minus"): separate = text.split(" ") try: t1 = int(text.split(" ")[1]) t2 = int(text.split(" ")[2]) txt = str(t1) + " - " + str(t2) + "\n──────────────" txt += "\n" + str(t1 - t2) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage( text="วิธีการใช้งาน:\n/minus [ ตัวเลข ] [ ตัวเลข ]")) if text.startswith("/sqrt"): separate = text.split(" ") try: m = int(text.replace(separate[0] + " ", "")) txt = "สแควรูท " + str(m) + "\n──────────────" txt += "\n" + str(math.sqrt(m)) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage(text="วิธีการใช้งาน:\n/sql [ ตัวเลข ]")) if text.startswith("/mtpt"): separate = text.split(" ") try: m = int(text.replace(separate[0] + " ", "")) txt = "สูตรคูณแม่ " + str(m) + "\n──────────────" for i in range(12): x = i + 1 txt += "\n" + str(m) + " * " + str(x) + " = " + str(m * x) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage(text="วิธีการใช้งาน:\n/mtpt [ ตัวเลข ]")) elif text.startswith("/mtp"): separate = text.split(" ") try: t1 = int(text.split(" ")[1]) t2 = int(text.split(" ")[2]) txt = str(t1) + " * " + str(t2) + "\n──────────────" txt += "\n" + str(t1 * t2) line_bot_api.push_message(gid, TextSendMessage(text=txt)) except: line_bot_api.push_message( gid, TextSendMessage( text="วิธีการใช้งาน:\n/mtp [ ตัวเลข ] [ ตัวเลข ]")) if text.startswith("/spam "): separate = text.split(" ") texxt = text.replace(separate[0] + " ", "") textt = texxt.replace(separate[1] + " ", "") textx = "จำนวน " + separate[1] + "\nข้อความ " + textt line_bot_api.push_message(gid, TextSendMessage(text=textx)) try: x = int(separate[1]) if x < 21: for i in range(x): line_bot_api.push_message(gid, TextSendMessage(text=textt)) else: line_bot_api.push_message( gid, TextSendMessage(text="ไม่สามารถสแปมมากกว่า 20 ข้อความได้")) except: pass if text == "/group": member_ids_res = line_bot_api.get_group_member_ids(group_id) line_bot_api.push_message(gid, member_ids_res.member_ids) line_bot_api.push_message(gid, member_ids_res.next) if text.startswith("/yt"): separate = text.split(" ") search = text.replace(separate[0] + " ", "") url = requests.get( "http://api.w3hills.com/youtube/search?keyword={}&api_key=86A7FCF3-6CAF-DEB9-E214-B74BDB835B5B" .format(search)) data = url.json() no = 0 result = "ยูทูป ( ค้นหา " + search + " )\n──────────────" for anu in data["videos"]: no += 1 result += "\n{}. {}\n{}\n".format(str(no), str(anu["title"]), str(anu["webpage"])) result += "\nทั้งหมด {}".format(str(len(data["videos"]))) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=result)) if text.startswith("/news"): try: separate = text.split(" ") country = text.replace(separate[0] + " ", "") if (country == None): country == "th" user_agent = {'User-agent': 'Mozilla/5.0'} url = requests.get( "https://newsapi.org/v2/top-headlines?country={}&apiKey=763b6fc67a594a4e9e0f9d29303f83dd" .format(country)) data = url.json() result = "ข่าวใหม่ ( " + country.upper( ) + " )" + "\n──────────────" n = 0 for anu in data["articles"]: if len(result) > 500: result += "\nทั้งหมด {}".format(n) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=result)) else: n = n + 1 result += "\n" + anu["title"] + "\n" + anu["url"] + "\n" result += "\nทั้งหมด {}".format(n) line_bot_api.push_message(gid, TextSendMessage(text=result)) except Exception as Error: line_bot_api.reply_message(event.reply_token, TextSendMessage(text=Error)) if text.startswith("/share"): quandl.ApiConfig.api_key = 'sSGoP_R7-sNMXusmJr7p' data = quandl.get("THAISE/INDEX") line_bot_api.push_message(gid, TextSendMessage(text=data)) if text.startswith("/snews"): separate = text.split(" ") searchx = text.replace(separate[0] + " ", "") search = searchx gs = goslate.Goslate() search = gs.translate(searchx, 'en') r = requests.get("http://www.google.co.th/search?q=" + search + "&tbm=nws") content = r.text news_summaries = [] soup = BeautifulSoup(content, "html.parser") st_divs = soup.findAll("div", {"class": "st"}) g_divs = soup.findAll("div", {"class": "g"}) trs = "ข่าวเกี่ยวกับ " + searchx + "\n──────────────" news_d = [] for g_div in g_divs: news_d.append(g_div.text) for st_div in st_divs: news_summaries.append(st_div.text) for i in news_summaries: for x in news_d: try: if len(trs) > 600: line_bot_api.reply_message(event.reply_token, TextSendMessage(text=trs)) else: gs = goslate.Goslate() x = gs.translate(x, 'th') trs += "\n\n" + x + "\nอ่านเพิ่มเติมได้ที่" except Exception as error: line_bot_api.reply_message(event.reply_token, TextSendMessage(text=error)) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=trs)) if text == "/bye": if (event.source.user_id == "Udaa0a2f396dd41e4398b106d903d92fd"): confirm_template_message = TemplateSendMessage( alt_text='God message', template=ConfirmTemplate(text='จะลบบอทออก? คุณแน่ใจหรือ?', actions=[ PostbackAction( label='แน่ใจ', text='goodbye', data='action=buy&itemid=1'), MessageAction(label='ไม่', text='...') ])) line_bot_api.reply_message(event.reply_token, confirm_template_message) else: line_bot_api.reply_message( event.reply_token, TextSendMessage(text="ผู้ใช้นี้ไม่ได้รับอนุญาต")) if "/ti/g/" in text: link_re = re.compile( '(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?') links = link_re.findall(text) n_links = [] for l in links: if l not in n_links: n_links.append(l) for ticket_id in n_links: line_bot_api.push_message(gid, TextSendMessage(text="Joined")) line_bot_api.acceptGroupInvitationByTicket(gid, ticket_id) if text == '/contact': buttons_template_message = TemplateSendMessage( alt_text='God message', template=ButtonsTemplate( thumbnail_image_url= 'https://gamingroom.co/wp-content/uploads/2017/11/CyCYOArUoAA2T6d.jpg', title='ติดต่อ', text='ช่องทางการติดต่อ', actions=[ PostbackAction(label='ไลน์', text='http://line.me/ti/p/~esci_', data='action=buy&itemid=1'), MessageAction(label="เฟซบุ๊ค", text='https://www.facebook.com/pasun.cf'), URIAction(label='ติดต่อ', uri='http://line.me/ti/p/~esci_') ])) line_bot_api.push_message(gid, buttons_template_message) if '/wiki ' in text: try: wiki = text.replace("/wiki ", "") wikipedia.set_lang("th") pesan = "วิกิพีเดียเกี่ยวกับ " pesan += wikipedia.page(wiki).title pesan += "\n\n" pesan += wikipedia.summary(wiki, sentences=1) pesan += "\n\nอ่านเพิ่มเติม\n" pesan += wikipedia.page(wiki).url titlex = wikipedia.page(wiki).title textx = wikipedia.summary(wiki, sentences=1) urlx = wikipedia.page(wiki).url line_bot_api.reply_message(event.reply_token, TextSendMessage(text=pesan)) except: try: pesan = "เกินขีด จำกัด ข้อความ! โปรดคลิกลิงก์ข้างล่างเพื่ออ่านเพิ่มเติม\n" pesan += wikipedia.page(wiki).url line_bot_api.reply_message(event.reply_token, TextSendMessage(text=pesan)) except Exception as e: line_bot_api.reply_message(event.reply_token, TextSendMessage(text=str(e))) if text == "/kick": line_bot_api.kickoutFromGroup(0, gid, "Udaa0a2f396dd41e4398b106d903d92fd") if text == "/profile": profile = line_bot_api.get_profile(event.source.user_id) line_bot_api.push_message(gid, TextSendMessage(text=event.source.user_id)) line_bot_api.push_message(gid, TextSendMessage(text=profile.display_name)) line_bot_api.push_message(gid, TextSendMessage(text=profile.status_message)) line_bot_api.push_message(gid, TextSendMessage(text=profile.picture_url)) if text == '/id': profile = line_bot_api.get_profile(event.source.user_id) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.display_name)) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=event.source.user_id)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.picture_url)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.status_message)) if text == '/bio': profile = line_bot_api.get_profile(event.source.user_id) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.display_name)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=event.source.user_id)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.picture_url)) line_bot_api.reply_message( event.reply_token, TextSendMessage(text=profile.status_message)) if text == '/pic': profile = line_bot_api.get_profile(event.source.user_id) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.display_name)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=event.source.user_id)) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=profile.picture_url)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.status_message)) if text == '/name': profile = line_bot_api.get_profile(event.source.user_id) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=profile.display_name)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=event.source.user_id)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.picture_url)) #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=profile.status_message)) if text == 'goodbye': if (event.source.user_id == "Udaa0a2f396dd41e4398b106d903d92fd"): if isinstance(event.source, SourceGroup): line_bot_api.reply_message( event.reply_token, TextSendMessage(text='กำลังออกกลุ่ม...')) line_bot_api.leave_group(event.source.group_id) elif isinstance(event.source, SourceRoom): line_bot_api.reply_message( event.reply_token, TextSendMessage(text='กำลังออกกลุ่ม...')) line_bot_api.leave_room(event.source.room_id) else: line_bot_api.reply_message( event.reply_token, TextSendMessage(text="บอทไม่สามารถออกแชท 1:1 ได้")) elif "/idline " in event.message.text: skss = event.message.text.replace('/idline ', '') sasa = "http://line.me/R/ti/p/~" + skss text_message = TextSendMessage(text=sasa) line_bot_api.reply_message(event.reply_token, text_message) elif text.startswith('/check'): originURLx = text.split(" ") originURL = text.replace(originURLx[0] + " ", "") result = requests.get( "http://shorturlbyzefyrinusx.000webhostapp.com/api/check.php?id=" + originURL + "&type=api").text line_bot_api.reply_message(event.reply_token, TextSendMessage(text=result)) elif text.startswith('/shorturl'): originURLx = text.split(" ") originURL = text.replace(originURLx[0] + " ", "") result = requests.get( "http://shorturlbyzefyrinusx.000webhostapp.com/api/urlshorten.php?url=" + originURL).text buttons_template_message = TemplateSendMessage( alt_text='God message', template=ButtonsTemplate( thumbnail_image_url= 'https://gamingroom.co/wp-content/uploads/2017/11/CyCYOArUoAA2T6d.jpg', title='RESULT', text=result, actions=[ PostbackAction(label='ข้อมูล URL', text='/check ' + result, data='action=buy&itemid=1'), MessageAction(label="URL", text=result), URIAction(label='เปิด URL', uri=result) ])) line_bot_api.reply_message(event.reply_token, buttons_template_message) elif '/help' == text: line_bot_api.reply_message(event.reply_token, TextSendMessage(text=helpmessage)) elif '/test' == text: buttons_template_message = TemplateSendMessage( alt_text='Buttons template', template=ButtonsTemplate( thumbnail_image_url='https://example.com/image.jpg', title='Menu', text='God message', actions=[ PostbackAction(label='postback', text='postback text', data='action=buy&itemid=1'), MessageAction(label='message', text='message text'), URIAction(label='uri', uri='http://example.com/') ])) line_bot_api.reply_message(event.reply_token, image_carousel_template_message)
import DB.manageDB as mdb from discord.ext import commands import discord import wikipedia as wiki wiki.set_lang("fr") class tagCommands(commands.Cog): def __init__(self, ctx): return (None) @commands.command(pass_context=True) async def toptag(self, ctx, nb=10): resList = mdb.colOccurence("tag_value", "tagmap") msg = "" if resList: if nb > len(resList) or nb == -1: nb = len(resList) resList.sort(key=lambda x: x[1], reverse=True) # Mesure anti flood if nb <= 10: for i in range(0, nb): msg += "**{}.** {} ({})\n".format(str(i + 1), resList[i][0], resList[i][1]) await ctx.channel.send(msg) else: for i in range(0, nb): msg += "**{}.** {} ({})\n".format(str(i + 1),