def main(dict_classes, schedule_sheet, hours_list, cells_list, days_list, credentials): take() start_chrome("google.com") # login into the Google account if credentials[0] == "ita\n": go_to( "https://accounts.google.com/signin/v2/identifier?service=classroom&passive=1209600&continue=https%3A%2F%2Fclassroom.google.com%2Fu%2F0%2Fh%3Fhl%3Dit&followup=https%3A%2F%2Fclassroom.google.com%2Fu%2F0%2Fh%3Fhl%3Dit&hl=it&flowName=GlifWebSignIn&flowEntry=ServiceLogin" ) write( credentials[1].replace("\n", ""), into="Indirizzo email o numero di telefono", ) press(ENTER) write(credentials[2], into="Inserisci la password") press(ENTER) elif credentials[0] == "eng\n,": go_to( "https://accounts.google.com/signin/v2/identifier?service=classroom&passive=1209600&continue=https%3A%2F%2Fclassroom.google.com%2Fu%2F0%2Fh%3Fhl%3Dit&followup=https%3A%2F%2Fclassroom.google.com%2Fu%2F0%2Fh%3Fhl%3Dit&hl=it&flowName=GlifWebSignIn&flowEntry=ServiceLogin" ) write(credentials[1].replace("\n", ""), into="Email or phone") press(ENTER) write(credentials[2], into="Enter your password") press(ENTER) else: print(colorama.Fore.RED + colorama.Style.BRIGHT + "\n\nThe language is not supported (ita-eng)") while A == 0: k = giorno(days_list) materia = ora(hours_list, cells_list, k, schedule_sheet) meet(materia, dict_classes, k, credentials)
def get_interactive_page_source(url): """Obtém código-fonte completo da página.""" # inicia o chrome para renderizar o código-fonte try: start_chrome(url, headless=True) except Exception: print( "Erro: você precisa instalar o Google Chrome e o ChromeDriver par" "a executar esse raspador.") sys.exit(1) driver = get_driver() # clica em todos os botões "Veja mais!" para liberar os dados dos resumos print( f"Raspando a página \"{driver.title}\". Isso pode demorar alguns segundos..." ) buttons = find_all(S("//span[@onClick]")) for _ in tqdm(range(len(buttons))): click("Veja mais!") print('Fim da raspagem da página.') # obtém objeto soup a partir do código-fonte renderizado pelo helium soup = BeautifulSoup(driver.page_source, 'html.parser') # fecha o chrome kill_browser() return soup
def submit_request(song, artist, url='www.harmonixmusic.com/games/rock-band/request'): "Uses helium framework to open chrome, open a website, and submit a song" logging.info("Logging into %s", url) start_chrome(url, headless=True) write(song, into='Song Title') write(artist, into='Artist') logging.info("Submitting request for '%s' by '%s'", song, artist) click('Submit') kill_browser()
def getDemons(name): list1 = [] list0 = [] start_chrome('https://pointercrate.com/demonlist/') press(END) press(PAGE_UP) click('Open the stats viewer!') write(name, into='Enter to search...') click(name) time.sleep(1) main_is_empty = 1 extended_is_empty = 1 mainverifs_is_empty = 1 extendedverifs_is_empty = 1 for x in range(75): counter = x + 1 if main_is_empty: mainlist = find_all(S('//*[@id="beaten"]/b[%d]/a' % (counter))) if extended_is_empty: extendedlist = find_all( S('//*[@id="beaten"]/span[%d]/a' % (counter))) if mainverifs_is_empty: verifications_main = find_all( S('//*[@id="verified"]/b[%d]/a' % (counter))) if extendedverifs_is_empty: verifications_extended = find_all( S('//*[@id="verified"]/span[%d]/a' % (counter))) if mainlist == []: main_is_empty = 0 if extendedlist == []: extended_is_empty = 0 if verifications_main == []: mainverifs_is_empty = 0 if verifications_extended == []: extendedverifs_is_empty = 0 list0.append(mainlist) list0.append(extendedlist) list0.append(verifications_main) list0.append(verifications_extended) list0 = [element for element in list0 if element != []] for demon in list0: demon = str(demon) start = [m.start() for m in re.finditer(">", demon)][0] end = [m.start() for m in re.finditer("<", demon)][1] demon = demon[(start + 1):end] list1.append(demon) kill_browser() return list1
def start_and_login(): while True: opts = chromeOpts() opts.add_argument(f"user-agent={ua.random}") prefs = { 'profile.managed_default_content_settings.images': 2 } # Disallow images from loading opts.add_experimental_option("prefs", prefs) driver = helium.start_chrome( "https://www.udemy.com/random_page_that_does_not_exist/", headless=False, options=opts) driver.add_cookie({ 'name': 'client_id', 'value': client_id, 'domain': udemy_domain }) driver.add_cookie({ 'name': 'access_token', 'value': access_token, 'domain': udemy_domain }) helium.go_to("https://www.udemy.com/?persist_locale=&locale=en_US") if 'upgrade-your-browser' in driver.current_url: driver.quit() else: break try: helium.wait_until(helium.Button('Log in').exists, timeout_secs=3.5) helium.kill_browser() raise InvalidCookiesException() except TimeoutException: return driver
def test_invalid_credentials(self): driver = helium.start_chrome('https://goantifraud.com/manager', headless=True) helium.write(TEST_DATA['invalid_credentials']['log'], into='login') helium.write(TEST_DATA['invalid_credentials']['pass'], into='password') helium.press(helium.ENTER) check = helium.Text('Login or password is incorrect').value helium.kill_browser() self.assertEqual(check, 'Login or password is incorrect')
def test_recover_pass_with_invalid_user(self): driver = helium.start_chrome('https://goantifraud.com/manager', headless=True) helium.click('Forgot your password?') helium.write(TEST_DATA['invalid_credentials']['log'], into='login') helium.click('SEND') time.sleep(0.5) validation_msg_list = helium.find_all(helium.S(".error.error_show")) helium.kill_browser() self.assertEqual(len(validation_msg_list), 2)
def __init__(self, classe, firstname, lastname, email, password, birthdate=None, ecole="victor hugo", url="https://www.kartable.fr/inscription"): # birthdate as follows : DD/MM/YYYY self.url = url self.classe = classe self.firstname = firstname self.lastname = lastname self.email = email self.password = password self.ecole = ecole self.birthdate = birthdate.split("/") if len(str(birthdate).split("/")) == 3 else None self.driver = helium.start_chrome(self.url)
def superstore(item): # Get search page using chrome browser, wait for it to finish loading and pass to Beautiful Soup object browser = helium.start_chrome( 'https://www.realcanadiansuperstore.ca/search?search-bar=' + item, headless=True) time.sleep(2) soup_sstore = BS(browser.page_source, 'html.parser') # Check for no results found, and return only empty search message if found if soup_sstore.find(class_="search-no-results__section-title"): brand = soup_sstore.find( class_="search-no-results__section-title").get_text(strip=True) name, pack, unit, price = '', '', '', '' # Otherwise gather item info for first result else: brand = soup_sstore.find( class_="product-name__item product-name__item--brand").get_text( strip=True) name = soup_sstore.find( class_="product-name__item product-name__item--name").get_text( strip=True) pack = soup_sstore.find( class_="product-name__item product-name__item--package-size" ).get_text(strip=True) unit = soup_sstore.find( class_= "price__unit selling-price-list__item__price selling-price-list__item__price--now-price__unit" ).get_text(strip=True) price = soup_sstore.find( class_= "price selling-price-list__item__price selling-price-list__item__price--now-price" ).get_text(strip=True) # Return info in a dictionary return { 'brand': brand, 'name': name, 'pack': pack, 'unit': unit, 'price': price }
def johns(item): # Site is identical to Superstore browser = helium.start_chrome( 'https://www.yourindependentgrocer.ca/search?search-bar=' + item, headless=True) time.sleep(2) soup_johns = BS(browser.page_source, 'html.parser') if soup_johns.find(class_="search-no-results__section-title"): brand = soup_johns.find( class_="search-no-results__section-title").get_text(strip=True) name, pack, unit, price = '', '', '', '' else: brand = soup_johns.find( class_="product-name__item product-name__item--brand").get_text( strip=True) name = soup_johns.find( class_="product-name__item product-name__item--name").get_text( strip=True) pack = soup_johns.find( class_="product-name__item product-name__item--package-size" ).get_text(strip=True) unit = soup_johns.find( class_= "price__unit selling-price-list__item__price selling-price-list__item__price--now-price__unit" ).get_text(strip=True) price = soup_johns.find( class_= "price selling-price-list__item__price selling-price-list__item__price--now-price" ).get_text(strip=True) return { 'brand': brand, 'name': name, 'pack': pack, 'unit': unit, 'price': price }
import helium import hashlib driver = helium.start_chrome("https://temp-mail.org/en/", headless=True) helium.wait_until( lambda: "@" in helium.TextField("Your Temporary email address").value) email = helium.TextField("Your Temporary email address").value password = hashlib.sha1(email.encode()).hexdigest() print(email, password) helium.kill_browser() driver = helium.start_chrome("https://www.kartable.fr/inscription") helium.click("élève") helium.click("1ère") helium.click("s'inscrire avec un e-mail") helium.write("Cld", into="prénom") helium.write("Lokidod", into="nom") helium.write(email, into="adresse e-mail") helium.write(password, into="mot de passe") helium.click("terminer") helium.wait_until(helium.Text("plus tard").exists) helium.click("plus tard") #helium.kill_browser()
def main(cls): start_chrome(headless=True) cls.synchronize_with_parent_process()
import helium # 比较适合新手的自动化库 from time import sleep home_url = 'https://mail.163.com/' # 网站主地址 user_name = 'luobomc' # 登陆用户名 password = '******' # 密码 driver = helium.start_chrome(home_url) # 打开浏览器 并且打开home_url地址页面 sleep(5) # 睡眠5s等待页面加载 helium.write(user_name, helium.TextField('邮箱帐号或手机号码')) # 锁定'邮箱账号或手机号码'并且锁定 然后传入用户名 sleep(2) # 睡眠2秒 helium.write(password, helium.TextField('输入密码')) # 锁定密码输入框并且输入 helium.press(helium.ENTER) # 模拟按回车 登录 sleep(5) # 睡眠5秒等待页面刷新 helium.click(helium.Text('收 信')) # 点击收信按钮 查看邮件 helium.kill_browser() # 关闭浏览器
@author: gustavolibotte """ import json import sys from time import sleep from helium import S, click, kill_browser, start_chrome, wait_until from selenium import webdriver from selenium.common.exceptions import TimeoutException from proj_consts import ProjectConsts opts = webdriver.ChromeOptions() opts.set_capability("loggingPrefs", {"performance": "ALL"}) driver = start_chrome(ProjectConsts.BRAZIL_HEALTH_MINISTRY_URL, options=opts) wait_until(S("ion-button").exists) sleep(3) click("Arquivo CSV") global URL URL = None def process_browser_log_entry(entry): response = json.loads(entry["message"])["message"] return response def fetch_download_url(): global URL
def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: start_chrome() dispatcher.utter_message("已打开") return []
DATE = input('Enter DDMM: ') DATE = DATE[:2] + '/' + DATE [2:] + '/2021' BANK = input('Enter Bank (BCK=1/BOK=2): ') if BANK == '1': BANK, JOURNAL = 'BANK BCA (JKT) OUT', 'BCK' elif BANK == '2': BANK, JOURNAL = 'OPERASIONAL OUT', 'BOK' # BANK, JOURNAL = 'OCBC NISP (JKT) OUT', 'BOK' DESC = '' #------------------------------------------------------------------------------- driver = start_chrome('http://') h_click('Live1') write(EMAIL, into='Email') write(PASSWORD, into='Password') h_click('Log in') h_click('HR Expenses') print('Current journal date: ' + DATE) print('Current journal: ' + JOURNAL) print('Current description:' + DESC) #------------------------------------------------------------------------------- TAX = True