def _neova_helper(self, driver: webdriver) -> None: """[summary] """ # farligt med att namnge filerna utifrpn ordningen i listan på webben. denna kan ändras. Bättre om metadata fanns i csvfilen. neova_anläggningar: Dict = { "Mimer_7": '//*[@id="app"]/div/div/div[1]/div[2]/div[1]/nav/div/div[2]/div/ul/li[2]/div[2]', "Läroverket_Stålhagskolan": '//*[@id="app"]/div/div/div[1]/div[2]/div[1]/nav/div/div[2]/div/ul/li[3]/div[2]', "Smeden_8": '//*[@id="app"]/div/div/div[1]/div[2]/div[1]/nav/div/div[2]/div/ul/li[4]/div[2]', "Solvändan_2": '//*[@id="app"]/div/div/div[1]/div[2]/div[1]/nav/div/div[2]/div/ul/li[5]/div[1]', "Läroverket_C_huset": '//*[@id="app"]/div/div/div[1]/div[2]/div[1]/nav/div/div[2]/div/ul/li[6]/div[2]', } for key, value in neova_anläggningar.items(): driver.find_element_by_class_name("usage-place-bar__button").click() ts(5) driver.find_element_by_xpath(value).click() ts(5) driver.find_element_by_class_name("usage-place-menu__close").click() ts(2) # ladda ner csv driver.find_element_by_xpath( '//*[@id="base__content"]/div[2]/div[1]/div[7]/a' ).click() ts(5) self._rename_file(facility=key)
def parse_post_data(driver: webdriver, url): """Собирает данные о лайках и комментах к посту""" driver.get(url) try: likes = int( driver.find_elements_by_class_name( 'sqdOP')[2].find_element_by_tag_name('span').text.replace( ' ', '')) # sqdOP yWX7d _8A5w5 vcOH2 views = 0 except NoSuchElementException: print('это видео') try: button = driver.find_element_by_class_name('vcOH2') views = int( button.find_element_by_tag_name('span').text.replace(' ', '')) button.click() likes = int( driver.find_element_by_class_name( 'vJRqr').find_element_by_tag_name('span').text.replace( ' ', '')) button = driver.find_element_by_class_name('QhbhU') button.click() except NoSuchElementException: views = 0 try: likes = int( driver.find_element_by_class_name( 'vJRqr').find_element_by_tag_name('span').text.replace( ' ', '')) except NoSuchElementException: likes = 0 print(f'Нихуя не нашло у поста: {url}') comments = get_comments_count(driver=driver) return likes, views, comments
def filter_posts_list(driver: webdriver, months: list, posts_list: list) -> list: """Отфильтровывает только нужные для анализа записи""" res = [] for post_url in posts_list: driver.get(post_url) try: sleep(2) time = driver.find_element_by_class_name('Nzb55').get_attribute( 'datetime') for month in months: if time[0:7] == month: res.append(post_url) except NoSuchElementException: print(f'ДАТУ НЕ НАХОДИТ!!! ПОСТ {post_url}') driver.get(post_url) try: sleep(2) time = driver.find_element_by_class_name( 'Nzb55').get_attribute('datetime') for month in months: if time[0:7] == month: res.append(post_url) except NoSuchElementException: print(f'ДАТУ НЕ НАХОДИТ ВТОРОЙ РАЗ!!! ПОСТ {post_url}') res.append(post_url) return res
def go_to_cart(driver: webdriver): try: WebDriverWait(driver, 3).until( # As invoked in this sequence, not wait here - but make no assumptions expected_conditions.presence_of_element_located((By.CLASS_NAME, 'shopping_cart_link')) ) driver.find_element_by_class_name('shopping_cart_link').click() print(str(datetime.datetime.now()) + ' go_to_cart PASSED') except: traceback.print_exc() raise
def search_cim(driver: webdriver, keyword: str) -> bool: """Wikiloc search box input.""" cim_name = keyword search = driver.find_element_by_class_name("search-box__input") search.send_keys(cim_name) search.click() try: btn_select_first = driver.find_element_by_class_name( "search-box-item__first") btn_select_first.click() return True except ElementClickInterceptedException: # TODO: handle possible error here and logging print("ERROR") return False
def followAccountsFromList(browser: webdriver, account): global remain_follow_count """ Twitterでアカウントのフォロワーをフォローする :param browser: webdriver """ # リストにアクセスする browser.get('https://twitter.com/' + account + '/followers') grid = browser.find_element_by_class_name("GridTimeline") profile_cards = grid.find_elements_by_class_name("js-actionable-user") limit = ACCOUNT_FOLLOW_LIMIT for profile_card in profile_cards: if remain_follow_count > 0 and limit > 0: is_follow = bool( profile_card.find_elements_by_class_name("not-following")) is_open = bool( profile_card.find_elements_by_class_name("js-protected")) description = profile_card.find_element_by_class_name( "ProfileCard-bio") description_text = description.text follow_btn = profile_card.find_elements_by_class_name( "follow-text") if len(description_text) > 20 and is_follow and bool(follow_btn): follow_btn[0].click() sleep(1) remain_follow_count = remain_follow_count - 1 limit = limit - 1 else: break
def get_atms(driver: webdriver, city_name: str, region_name: str): """Проходим по всем страницам, и получаем список банкоматов. :driver: selenium.webdriver :returns: [{ ... }] """ pages = 0 rows = list() has_next_page = True while has_next_page: pages = pages + 1 for row in driver.find_elements_by_class_name('page-atm__table_row'): rows.append({ 'region': region_name, 'city': city_name, 'bank': row.find_element_by_class_name('address-logo').text, 'address_title': row.find_element_by_class_name('address-title').text, 'address_type': row.find_element_by_class_name('address-type').text, 'working_time': row.find_element_by_class_name( 'page-atm__table_col--time').text, 'currency': row.find_element_by_class_name( 'page-atm__table_col--currency').text, 'address_metro': row.find_element_by_class_name('address-metro').text, 'address_map': (row.find_element_by_class_name( 'address-map').find_element_by_link_text( 'Показать на карте').get_attribute("href")) }) try: sleep(randint(1, 3)) driver.find_element_by_class_name('pagination-arrow--next').click() except NoSuchElementException: has_next_page = False logging.info('{}: {} (pages: {}; atms: {})'.format(region_name, city_name, pages, len(rows))) return rows
def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=1): def scroll_to_end(wd): wd.execute_script("window.scrollTo(0, document.body.scrollHeight);") sleep(sleep_between_interactions) # build the google query search_url = """https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img""" # load the page wd.get(search_url.format(q=query)) sleep(0.7) image_urls = set() image_count = 0 results_start = 0 while image_count < max_links_to_fetch: scroll_to_end(wd) # get all image thumbnail results thumbnail_results = wd.find_elements_by_css_selector("img.rg_i") number_results = len(thumbnail_results) print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}") for img in thumbnail_results[results_start:number_results]: # try to click every thumbnail such that we can get the real image behind it try: img.click() sleep(sleep_between_interactions) except Exception: continue # extract image urls actual_images = wd.find_elements_by_css_selector('img.rg_i') for actual_image in actual_images: if actual_image.get_attribute('src'): image_urls.add(actual_image.get_attribute('src')) image_count = len(image_urls) if len(image_urls) >= max_links_to_fetch: print(f"Found: {len(image_urls)} image links, done!") break else: print("Found:", len(image_urls), "image links, looking for more ...") sleep(0.8) load_more_button = wd.find_element_by_class_name(".ksb") #load_more_button = wd.find_element_by_id('n3VNCb') # load_more_button = wd.find_element_by_css_selector("") # if load_more_button: wd.execute_script("document.querySelector('.ksb').click();") # .ksb # move the result startpoint further down results_start = len(thumbnail_results) return image_urls
def carga_archivo_claro_drive(webdriver_test_ux: webdriver, path_archivo_carga: str, nombre_archivo_sin_ext: str, jsonEval): tiempo_step_inicio = Temporizador.obtener_tiempo_timer() fecha_inicio = Temporizador.obtener_fecha_tiempo_actual() try: btn_crear = webdriver_test_ux.find_element_by_class_name( 'button-create-resource') btn_crear.click() time.sleep(10) WebDriverWait(webdriver_test_ux, 20).until( EC.presence_of_element_located((By.ID, 'file_upload_start'))) input_file = webdriver_test_ux.find_element_by_id('file_upload_start') time.sleep(4) input_file.send_keys(path_archivo_carga) WebDriverWait(webdriver_test_ux, 720).until( EC.presence_of_element_located( (By.XPATH, '//span[@class="name-without-extension"][text()="{} "]'. format(nombre_archivo_sin_ext)))) jsonEval["steps"][1]["output"][0]["status"] = jsonConst.SUCCESS jsonEval["steps"][1]["status"] = jsonConst.SUCCESS jsonEval["steps"][1]["output"][0][ "output"] = 'Se realiza correctamente la carga del archivo' except selExcep.NoSuchElementException: jsonEval["steps"][1]["output"][0]["status"] = jsonConst.FAILED jsonEval["steps"][1]["status"] = jsonConst.FAILED jsonEval["steps"][1]["output"][0][ "output"] = 'No fue posible realizar la carga del archivo' except selExcep.ElementClickInterceptedException: jsonEval["steps"][1]["output"][0]["status"] = jsonConst.FAILED jsonEval["steps"][1]["status"] = jsonConst.FAILED jsonEval["steps"][1]["output"][0][ "output"] = 'No fue posible realizar la carga del archivo' tiempo_step_final = Temporizador.obtener_tiempo_timer( ) - tiempo_step_inicio fecha_fin = Temporizador.obtener_fecha_tiempo_actual() jsonEval["steps"][1]["time"] = FormatUtils.truncar_float_cadena( tiempo_step_final) jsonEval["steps"][1]["start"] = fecha_inicio jsonEval["steps"][1]["end"] = fecha_fin return jsonEval
def fetch_image_urls(search_keyword: str, download_number: int, wd: webdriver): print('------------------------------------------------') print('Start getting thumnails') fetch_thumbnail_count = 0 thumbnails = None # fetch thumbnails up to number of images to download while fetch_thumbnail_count < download_number: wd.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(1) thumbnails = wd.find_elements_by_css_selector('img.Q4LuWd') fetch_thumbnail_count = len(thumbnails) # break if fetched thumbnails number exceed number of images to download if len(thumbnails) >= download_number: print('Success: Fetched thumbnails count', download_number) break else: # load more thumbnails when load_more_button appears load_more_button = wd.find_element_by_css_selector(".mye4qd") if load_more_button: wd.execute_script("document.querySelector('.mye4qd').click();") # brek when end_text appears ( this is the limit of thumbnails that can be fetched ) end_text = wd.find_element_by_class_name('OuJzKb') if end_text and end_text.text == 'Looks like you\'ve reached the end': print('Success: Fetched maximum thumbnails count', len(thumbnails)) break print('Start getting image urls') image_urls = [] # extract the image url from the elements displayed by clicking the thumbnails for thumbnail in thumbnails[:download_number]: try: thumbnail.click() time.sleep(1) except Exception: continue # extract only the original image url because there are some urls thumbnail_alt = thumbnail.get_attribute('alt') images = wd.find_elements_by_css_selector('img.n3VNCb') for image in images: image_alt = image.get_attribute('alt') if thumbnail_alt == image_alt and 'http' in image.get_attribute('src'): image_urls.append(image.get_attribute('src')) print('Success: Fetched image urls count', len(image_urls)) return image_urls
def openUnipos(browser: webdriver): browser.get('https://unipos.me/login') # input login id login_id_xpath = '//*[@id="content"]/div/div/div[2]/input[1]' browser.find_element_by_xpath(login_id_xpath).send_keys( os.getenv('LOGIN_ID')) #input login password login_password_xpath = '//*[@id="content"]/div/div/div[2]/input[2]' browser.find_element_by_xpath(login_password_xpath).send_keys( os.getenv('LOGIN_PASSWORD')) #click login button browser.find_element_by_class_name('login_btn').click() #ページの読み込み自家を稼ぐ sleep(10) #execute clap hand(投稿の上位 n 件に拍手する) for clap_count in range(1, 2): clap_pass_xpath = '//*[@id="content"]/div[7]/div/div/div/div[2]/div[5]/div[{}]/div[1]/div[3]/div[3]/div[2]/a'.format( clap_count) browser.find_element_by_xpath(clap_pass_xpath).click()
def save_entry_pictures(driver: webdriver, entry_id: str): boy_page_url = "https://boyawards.secure-platform.com/a/gallery/rounds/12/details/" + entry_id driver.get(boy_page_url) li_webelements = driver.find_element_by_class_name( "slides").find_elements_by_tag_name("li") for i, li in enumerate(li_webelements): url = li.find_element_by_tag_name("a").get_attribute("href") ext = Path(urlparse(url).path).suffix path = "./images/" + entry_id + "/" Path(path).mkdir(parents=True, exist_ok=True) urlretrieve(url, (path + str(i + 1) + ext)) with open("log.txt", "a") as f: f.writelines( " ".join([entry_id, "got", str(len(li_webelements)), "images"]) + "\n")
def get_statistics_screenshot(driver: webdriver) -> Image.Image: # Ресайз окна, чтобы поместилась вся статистика driver.set_window_size(1920, 600) # Область со статистикой summary = driver.find_element_by_class_name('summary--emotion') statistics_location = summary.location_once_scrolled_into_view statistics_size = summary.size # Скриншот всего окна statistics = Image.open(BytesIO(driver.get_screenshot_as_png())) # Получаем ROI left = int(statistics_location['x']) top = int(statistics_location['y']) right = left + int(statistics_size['width']) bottom = top + int(statistics_size['height']) roi = (left, top, right, bottom) return statistics.crop(roi)
def execSearch(browser: webdriver): """ 京大の課題サイトにアクセスし、未実行の課題を教える。 """ browser.get('https://cas.ecs.kyoto-u.ac.jp/cas/login?service=https%3A%2F%2Fpanda.ecs.kyoto-u.ac.jp%2Fsakai-login-tool%2Fcontainer') sleep(1) #ログイン user_id = browser.find_element_by_name("username") user_id.clear() user_id.send_keys(settings.USN) password = browser.find_element_by_name("password") password.clear() password.send_keys(settings.PWD) login = browser.find_element_by_class_name("btn-submit") login.click() sleep(1) #各科目のページに遷移 base_url = browser.current_url links = { "弾性体の力学解析":"2020-110-3200-000", "流体力学":"2020-110-3165-000", "一般力学":"2020-110-3010-100", "基礎有機化学I":"2020-888-N347-014", "地球環境学のすすめ":"2020-888-Y201-001", "社会基盤デザインI":"2020-110-3181-000", "工業数学B2":"2020-110-3174-000", "確率統計解析及び演習":"2020-110-3003-000", "水文学基礎":"2020-110-3030-000", "地球工学基礎数理":"2020-110-3005-000", } # nav = browser.find_element_by_id("2020-110-3165-000") for subject,link_id in links.items(): unkadai(base_url,subject,link_id)
def fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver, sleep_between_interactions: int = 1): DRIVER_PATH = '/usr/local/bin/chromedriver' wd = webdriver.Chrome(executable_path=DRIVER_PATH) wd.get('https://google.com') def scroll_to_end(wd): for _ in range(1000): wd.execute_script("window.scrollTo(0, 1000000);") # Search query search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img" # load the page wd.get(search_url.format(q=query)) image_urls = set() image_count = 0 while image_count < max_links_to_fetch: scroll_to_end(wd) actual_images = wd.find_elements_by_css_selector('img.rg_i') for actual_image in actual_images: if actual_image.get_attribute('src'): image_urls.add(actual_image.get_attribute('src')) image_count = len(image_urls) if len(image_urls) >= max_links_to_fetch: print(f"Found: {len(image_urls)} image links, done!") break else: print("Found:", len(image_urls), "image links, looking for more ...") time.sleep(1) load_more_button = wd.find_element_by_class_name("mye4qd") if load_more_button: wd.execute_script("document.querySelector('.mye4qd').click();") return image_urls
def get_posts_list_and_subs(driver: webdriver, url: str, months: list): """Прогружает страницу пользователя и возвращает список из большинства последних постов""" driver.get(url) sleep(2) try: subs = int( driver.find_elements_by_class_name('g47SY')[1].get_attribute( 'title').replace(' ', '')) print(f'{subs} подписчиков') except IndexError: subs = 0 print(f'У аккаунта {url} не получилось собрать количество подписчиков') driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') sleep(2) posts = driver.find_elements_by_class_name('v1Nh3') posts_links = [] for post in posts: posts_links.append( post.find_element_by_tag_name('a').get_attribute('href')) try: more_button = driver.find_element_by_class_name('xLCgt') more_button.click() except NoSuchElementException: pass except ElementClickInterceptedException: print('кнопка не нажимается') for _ in range(5): driver.execute_script( 'window.scrollTo(0, document.body.scrollHeight);') sleep(2) posts = driver.find_elements_by_class_name('v1Nh3') for post in posts: posts_links.append( post.find_element_by_tag_name('a').get_attribute('href')) posts_links = delete_dublicates_from_list(posts_links) res = filter_posts_list(driver=driver, months=months, posts_list=posts_links) return res, subs
def test_guest_should_see_login_link(self, browser: webdriver, wait: WebDriverWait, link): browser.get(link) input = wait.until( EC.visibility_of_element_located((By.TAG_NAME, "textarea"))) answer = math.log(int(time.time())) input.send_keys(str(answer)) submit_btn = wait.until( EC.element_to_be_clickable( (By.CSS_SELECTOR, ".submit-submission"))) submit_btn.click() wait.until( EC.element_to_be_clickable( (By.XPATH, './/button[text()="Решить снова"]'))) black_line = browser.find_element_by_class_name( "smart-hints__feedback") if black_line.text != "Correct!": with open("answer_6_3.txt", "a") as f: f.write(black_line.text) assert black_line.text == "Correct!", 'Text in optional feedback is not "Correct!"'
def delete_actions(browser: webdriver, number: int) -> bool: browser.find_element_by_class_name( "timeline-comment-action.btn-link").click() sleep(1) browser.find_element_by_class_name( "dropdown-item.btn-link.menu-item-danger ").click() sleep(1) try: browser.find_element_by_class_name("btn.btn-block.btn-danger").click() print("Delete " + str(number) + " Workflow Successfully! ") return True except Exception as exception: print_traceback_error(exception) return False
def updateGameBoardHtml(current_session: webdriver): # get data directly from website element = current_session.find_element_by_class_name('board') elements = element.find_elements_by_xpath('./*') gameBoard = [None] * 9 for i, element in enumerate(elements): val = str( element.find_element_by_xpath('./*').get_attribute( 'class')).lower() if (val == 'x'): gameBoard[i] = 'X' elif (val == 'o'): gameBoard[i] = 'O' else: gameBoard[i] = None i = i + 1 return gameBoard
def toggle_relative(self, driver: webdriver) -> None: driver.find_element_by_class_name( constants.CLASS_TOGGLE_RELATIVE).click()
def CLR_Html(browser: webdriver, Name: str) -> str: """ a = CLR_Html(self.b, ['page_name', 'profile_message_send', 'profile_action_btn', 'profile_msg_split']) print(a) :param Name: :param browser: :return: """ res = [] all_res = [] # a = Soport_webdriver.CLR_Html(b,['mail_box_send']) for name in Name: try: all_res.append(browser.find_element_by_id(name).text) res.append('find_element_by_id') except: pass try: all_res.append(browser.find_element_by_name(name).text) res.append('find_element_by_name') except: pass try: all_res.append(browser.find_element_by_xpath(name).text) res.append('find_element_by_xpath') except: pass try: all_res.append(browser.find_element_by_link_text(name).text) res.append('find_element_by_link_text') except: pass try: all_res.append( browser.find_element_by_partial_link_text(name).text) res.append('find_element_by_partial_link_text') except: pass try: all_res.append(browser.find_element_by_tag_name(name).text) res.append('find_element_by_tag_name') except: pass try: all_res.append(browser.find_element_by_class_name(name).text) res.append('find_element_by_class_name') except: pass try: all_res.append(browser.find_element_by_css_selector(name).text) res.append('find_element_by_css_selector') except: pass io = '' for x in range(len(res)): io += '{} |-| {} |-| {}\n'.format(str(Name[x]), str(res[x]), str(all_res[x])) return io
def get_restaurant_details_from_a_page(driver: webdriver, r_url: str) -> dict: restaurant = {} soup = get_soup_using_selenium(r_url, driver, 15) # get restaurant's name r_name = soup.find('h1', attrs={'data-test-target': 'top-info-header'}) restaurant['name'] = r_name.string try: div_rating_review = soup.find('div', class_='Ct2OcWS4') rating_str = div_rating_review.find('span', class_='r2Cf69qf').text restaurant['rating'] = get_ratings(rating_str) num_review_str = div_rating_review.find('a', class_='_10Iv7dOs').string restaurant['num_reviews'] = get_number_of_reviews(num_review_str) except AttributeError: # no ratings and reviews restaurant['rating'] = -1 restaurant['num_reviews'] = -1 cuisine_str = '' try: # 'https://www.tripadvisor.com/Restaurant_Review-g293916-d15776288-Reviews-1826_Mixology_Rooftop_Bar-Bangkok.html' div_details = soup.find('div', class_='_3UjHBXYa').find_all( 'div', class_='_1XLfiSsv') if 'THB' in str(div_details[0]): cuisine_str = div_details[1].string else: cuisine_str = div_details[0].string except AttributeError: try: # 'https://www.tripadvisor.com/Restaurant_Review-g293916-d3715466-Reviews-Calderazzo_On_31-Bangkok.html' div_detail = soup.find('div', attrs={'data-tab': 'TABS_DETAILS'}) div_cuisines = div_detail.find_all('div', class_='ui_column') for d in div_cuisines: if 'CUISINES' in str(d): cuisine_str = d.find('div', class_='_2170bBgV').string # print(cuisine_str) except AttributeError: # div details doesn't show # https://www.tripadvisor.com/Restaurant_Review-g293916-d873174-Reviews-Le_Normandie-Bangkok.html sub_heading = soup.find('span', class_='_13OzAOXO _34GKdBMV') a_cuisines = list(sub_heading)[1:-1] cuisine_str = ','.join([a.string for a in a_cuisines]) # print(cuisine_str) cuisines = ','.join([c.strip() for c in cuisine_str.split(',')]) restaurant['cuisines'] = cuisines span_address = soup.find('span', class_='_2saB_OSe') restaurant['address'] = span_address.string span_opening_hrs = soup.find('span', class_='_1h0LGVD2').find_all('span') for s in span_opening_hrs: if 18 <= len(s.text) < 20: opening_hrs_str = s.text.replace(u'\xa0', u' ') restaurant['opening_hour'] = opening_hrs_str try: hr = restaurant['opening_hour'] except KeyError: see_all_hours = driver.find_element_by_class_name('ct1ZIzQ6') see_all_hours.click() sun_hrs = WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.CLASS_NAME, '_2W4n9Mwo'))) restaurant['opening_hour'] = sun_hrs.text return restaurant
def inicio_sesion_claro_drive(webdriver_test_ux: webdriver, jsonEval, jsonArgs): tiempo_step_inicio = Temporizador.obtener_tiempo_timer() fecha_inicio = Temporizador.obtener_fecha_tiempo_actual() try: webdriver_test_ux.get('https://www.clarodrive.com/') time.sleep(5) btn_inicio_sesion = webdriver_test_ux.find_element_by_id('login') btn_inicio_sesion.click() time.sleep(4) input_email = webdriver_test_ux.find_element_by_class_name( 'InputEmail') input_email.send_keys(jsonArgs['user']) time.sleep(4) input_password = webdriver_test_ux.find_element_by_class_name( 'InputPassword') input_password.send_keys(jsonArgs['password']) time.sleep(4) btn_ingreso_cuenta = webdriver_test_ux.find_element_by_xpath( '//button[text()="INICIAR SESI\u00D3N"]') btn_ingreso_cuenta.click() carpeta_folder = WebDriverWait(webdriver_test_ux, 60).until( EC.element_to_be_clickable( (By.XPATH, '//span[text()="Folder "][@class="name-without-extension"]'))) carpeta_folder.click() time.sleep(6) jsonEval["steps"][0]["output"][0]["status"] = jsonConst.SUCCESS jsonEval["steps"][0]["status"] = jsonConst.SUCCESS jsonEval["steps"][0]["output"][0][ "output"] = 'Se ingresa correctamente al portal Claro Drive' except selExcep.ElementNotInteractableException as e: jsonEval["steps"][0]["output"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["output"][0][ "output"] = 'fue imposible ingresar al portal Claro Drive' except selExcep.NoSuchElementException as e: jsonEval["steps"][0]["output"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["output"][0][ "output"] = 'fue imposible ingresar al portal Claro Drive' except selExcep.TimeoutException as e: jsonEval["steps"][0]["output"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["status"] = jsonConst.FAILED jsonEval["steps"][0]["output"][0][ "output"] = 'fue imposible ingresar al portal Claro Drive' tiempo_step_final = Temporizador.obtener_tiempo_timer( ) - tiempo_step_inicio fecha_fin = Temporizador.obtener_fecha_tiempo_actual() jsonEval["steps"][0]["time"] = FormatUtils.truncar_float_cadena( tiempo_step_final) jsonEval["steps"][0]["start"] = fecha_inicio jsonEval["steps"][0]["end"] = fecha_fin return jsonEval
def bypass_captcha(driver: webdriver, xpath_locator: str, locator_value: str): driver.switch_to.frame(driver.find_element_by_xpath(xpath_locator)) driver.find_element_by_class_name(locator_value).click()