Ejemplo n.º 1
0
binary = FirefoxBinary(r'/usr/bin/firefox')
driver = webdriver.Firefox(firefox_binary=binary, capabilities=capabilities)
driver.get('https://news.google.com/foryou?hl=pt-BR&gl=BR&ceid=BR%3Apt-419')

driver.find_element_by_id('identifierId').send_keys(username)
driver.find_element_by_id('identifierId').send_keys(chr(13))


time.sleep(2)

driver.find_element_by_class_name('zHQkBf').send_keys(password)
driver.find_element_by_class_name('zHQkBf').send_keys(chr(13))

time.sleep(4)

soup = bs4.BeatifulSoup(driver.page_source, 'lxml')
news = []

for link in soup.select('span')
    if str(link)[0:6] == "<span>"
        news.append(str(link).replace("<span>","").replace("</span>","")
        print(str(link).replace("<span>","").replace("</span>",""))

sounds = []
z = 0

for new in news:
    tts = gTTs(text= new, lang = 'pt')
    print("response" + str(z)+".mp3")
    tts.save("response" + str(z) + ".mp3")
    time.sleep(2)
Ejemplo n.º 2
0
#downloadXkcd.py - This program is used to download comic strips. Recreational use.

import requests, os, bs4

#starting domain website
url = 'http://xkcd.com'

#stores comic in ./xkcd
os.makedirs('xkcd', exist_ok=True)

while not url.endswith('#'):
	print('Downloading page %s...' % url)
	res = requests.get(url)
	res.rasie_for_status()

soup = bs4.BeatifulSoup(res.text)

comicElem = soup.select('#comic img')
if comicElem == []:
	print = ('Cant find any comics for you')
else:
	comicUrl = 'http:' + comicElem[0].get('src')
	print('Downloading the comics %s...' % (comicUrl))
	res = requests.get(comicUrl)
	res.raise_for_status()
	imagefile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
	for chunk in res.iter_content(100000):
		imageFile.write(chunk)
	imageFile.close()

prevLink = soup.select('a[rel="prev"]')[0]
    def _visit(self, url):
        response = requests.get(url)

        response.raise_for_status()

        self._html = bs4.BeatifulSoup(response.text, 'html.parser')
Ejemplo n.º 4
0
import requests
import bs4
from datetime import datetime
url="http://www.cbr.ru/scripts/XML_daily.asp"
today=datetime.now().strftime("%d/%m/%y")
payload={"data_req":today}
response=requests.get(url,params=payload)
soup=bs4.BeatifulSoup(response.content,"lxml")
def get_course(id):
    valute=soup.find("valute",{"id":id})
    nominal=valute.nominal
    name=nominal.next_sibling
    value=valute.value
    return "За {} {} дают {}".format(nominal.text,value.text)
valutes=soup.find_all("valute")
print(valutes)
for valute in valutes:
    print(get_course(valute["id"])
Ejemplo n.º 5
0
​import random
import requests, sys, webbrowser, bs4
import time

res = requests.get('http://google.com/search?q='+'.join(sys.argv[1:]))
res.raise_for_status()

soup = bs4.BeatifulSoup(res.text, "html.parser")
LinkElements = soup.Select('.r a')
LinkToOpen = min(5, len(LinkElements))
for i in range(linkToOpen):
    webbrowser.open('http://google.com'+LinkElements)

name = input("Hello, what is your name? ")

time.sleep(2)
print("Hello " + name)

feeling = input("How are you today? ")
​​​time.sleep(2)

​if "great" in feeling:
    print("I'm feeling great")
else:
    print("I'm so sorry about that!")

​time.sleep(2)
favcandy = input("What is your favourite candy? ")
candy = ["twix", "Hersey", "Jolly Ranchers"]

time.sleep(2)