/
answers.py
115 lines (95 loc) · 3.87 KB
/
answers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
###################
# pyAnswers #
# by @kernoeb #
###################
# Libraries
import wikipedia
from lxml import html
import requests, html2text
from bs4 import BeautifulSoup
from geopy.geocoders import Nominatim
from googletrans import Translator
# Selenium
# https://selenium-python.readthedocs.io/
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
def a_deepl(txt_arg):
options = Options()
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--ignore-certificate-errors')
options.add_argument("--test-type")
options.binary_location = "/usr/bin/chromium-browser"
browser = webdriver.Chrome(chrome_options=options)
browser.get('https://www.deepl.com/translator')
menu = browser.find_element_by_xpath("/html/body/div[1]/div[1]/div[1]/div[1]/div[1]/div/button")
menu.click()
lang = browser.find_element_by_xpath("/html/body/div[1]/div[1]/div[1]/div[1]/div[1]/div/div/button[4]")
lang.click()
text = browser.find_element_by_xpath("/html/body/div[1]/div[1]/div[1]/div[1]/div[2]/textarea")
text.send_keys(txt_arg)
wait = WebDriverWait(browser, 2)
ok = wait.until(ec.presence_of_element_located((By.XPATH, "/html/body/div[1]/div[1]/div[1]/div[2]/div[3]/p[1]")))
browser.execute_script('document.evaluate("/html/body/div[1]/div[1]/div[1]/div[2]/div[3]/p[1]", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.style.display = "block";', ok)
answer = browser.find_element_by_xpath("/html/body/div[1]/div[1]/div[1]/div[2]/div[3]/p[1]")
return answer.text
def a_lorem_ipsum(nb = "1"):
url = "https://lipsum.com/feed/html?amount={}&what=paras&start=yes&generate=Generate+Lorem+Ipsum".format(nb)
page = requests.get(url)
c = BeautifulSoup(page.content, "html.parser")
id = c.find("div", attrs={"id":"lipsum"})
lorem_txt = id.text.strip()
return lorem_txt
def a_lorem_ipsum_lang(lang = "fr"):
url = "https://lipsum.com/feed/html?amount=1&what=paras&start=yes&generate=Generate+Lorem+Ipsum"
page = requests.get(url)
c = BeautifulSoup(page.content, "html.parser")
id = c.find("div", attrs={"id":"lipsum"})
lorem_txt = id.text.strip()
t = Translator().translate(lorem_txt, src='la', dest=lang).text
return t
def a_meteo(loc):
geolocator = Nominatim()
location = geolocator.geocode(loc)
lat = location.latitude
lon = location.longitude
url = "https://weather.com/fr-FR/temps/aujour/l/{},{}".format(lat, lon)
try:
page = requests.get(url)
c = BeautifulSoup(page.content, "html.parser")
id = c.find("div", attrs={"class":"today_nowcard-temp"})
ret = id.text.strip()
except:
ret = "Erreur : Site weather.com en panne ou en maintenance. Si le problème perdure, veuillez contacter @kernoeb"
return ret
def a_cntrl(mot):
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
url = "http://www.cnrtl.fr/lexicographie/{}".format(mot)
try:
page = requests.get(url)
c = BeautifulSoup(page.content, "html.parser")
id = c.find("div", attrs={"id":"lexicontent"})
ret = h.handle(str(id))
except:
ret = "Erreur : Site CNTRL.COM en panne."
return ret
def a_wikipedia(recherche, lang = "fr"):
wikipedia.set_lang(lang)
try:
page = wikipedia.page(recherche)
answer = str(page.summary) + "\n\n" + str(page.url)
except:
answer = "Cet article n'existe pas..."
return answer
# print(a_deepl("Pomme de terre"))
# print(a_lorem_ipsum())
# print(a_lorem_ipsum_lang())
# print(a_meteo("Paris"))
# print(a_cntrl("chat"))
# print(a_wikipedia("Google"))