Esempio n. 1
1
 def apply_tags(self):
     """Push "apply" button with trailing page reloading."""
     old_url = self.browser.current_url
     self.browser.find_element_by_class_name(self.apply_btn).click()
     self.wait.until(EC.url_changes(old_url))
     self.wait_page_loading()
Esempio n. 2
0
    def location(self, url, data=None, headers=None, params=None, method=None, json=None):
        """Change current url of the browser.

        Warning: unlike other requests-based weboob browsers, this function does not block
        until the page is loaded, it's completely asynchronous.
        To use the new page content, it's necessary to wait, either implicitly (e.g. with
        context manager :any:`implicit_wait`) or explicitly (e.g. using method
        :any:`wait_until`)
        """
        assert method is None
        assert params is None
        assert data is None
        assert json is None
        assert not headers
        self.logger.debug('opening %r', url)
        self.driver.get(url)

        try:
            WebDriverWait(self.driver, 1).until(EC.url_changes(self.url))
        except TimeoutException:
            pass
        return FakeResponse(page=self.page)
Esempio n. 3
0
 def make_order(self):
     self.submit_button.click()
     self.driver.wait.until(EC.url_changes(self.path))
Esempio n. 4
0
 def apply_tags(self):
     """Push "apply" button with trailing page reloading."""
     old_url = self.browser.current_url
     self.browser.find_element_by_class_name(self.apply_btn).click()
     self.wait.until(EC.url_changes(old_url))
     self.wait_page_loading()
#wait until sign-in fields are visible
wait = WebDriverWait(driver, browser_action_timeout)
wait.until(
    ec.frame_to_be_available_and_switch_to_it(
        ("id", "gauth-widget-frame-gauth-widget")))
wait.until(ec.presence_of_element_located(("id", "username")))

#write login info to fields, then submit
print("Signing in to connect.garmin.com")
element = driver.find_element_by_id("username")
element.send_keys(user_name)
element = driver.find_element_by_id("password")
element.send_keys(password)
element.send_keys(Keys.RETURN)

wait.until(ec.url_changes(signin_url))  #wait until landing page is requested
driver.switch_to.default_content()  #get out of iframe

#get dummy webpage to obtain all request headers
print("Loading dummy page to obtain headers")
driver.get(sleep_url_base + start_date)
request = driver.wait_for_request(sleep_url_base + start_date,
                                  timeout=browser_action_timeout)

#close the Firefox browser
driver.close()
print("Headers obtained and Firefox has been closed")

#print("The request headers are:")
#print(request.headers)
#transfer request headers
Esempio n. 6
0
 def wait_page_load(self, old_url, time=10):
     WebDriverWait(self.driver, time).until(EC.url_changes(old_url))
 def test_submitting_to_interviewdb(self):
     self.browser.get(
         'http://*****:*****@id="root"]/section/div/main/nav/nav/button[3]').click()
         wait.until(
             EC.element_to_be_clickable((By.TAG_NAME, 'li')))
         self.browser.find_element_by_tag_name('input').clear()
         self.browser.find_element_by_tag_name('input').send_keys('365')
         self.browser.find_element_by_xpath(
             '//*[@id="react-tabs-1"]/div/div/div[1]/div/div/div[1]/div[2]/div/div[1]/select/option[7]').click()
         wait.until(EC.visibility_of_element_located(
             (By.XPATH, '//*[@id="react-tabs-1"]/div/div/div[1]/div/div/div[1]/div[3]/div[1]/div/div[2]/div')))
         time.sleep(2)
         fullTitleIsPresent = self.browser.page_source.find(
             jobDetails) != -1
         halfTitleIsPresent = halfJobDetails in self.browser.page_source
         if not halfTitleIsPresent or not fullTitleIsPresent:
             time.sleep(1)
             self.browser.find_element_by_xpath('//*[@id="root"]/section/div/nav/a[1]').click()
             wait.until(EC.invisibility_of_element((By.CLASS_NAME, 'sc-lcpuFF eOXROa')))
             if wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'btn-add'))):
                 self.browser.find_elements_by_class_name('btn-add')[5].click()
                 title = self.browser.find_element_by_id(
                     'root_applications_0_jobTitle')
                 title.click()
                 title.send_keys(
                     jobTitle)
                 actions = ActionChains(self.browser)
                 companyButton = self.browser.find_elements_by_class_name(
                     'css-1hwfws3')[0]
                 actions.click(companyButton)
                 actions.send_keys(
                     jobCompany)
                 actions.pause(2)
                 # actions.send_keys(Keys.UP)
                 actions.send_keys(Keys.ENTER)
                 # actions.send_keys(Keys.TAB)
                 actions.perform()
                 actions.reset_actions()
                 #
                 # find the link
                 # find the source field and create or select
                 sourceButton = self.browser.find_elements_by_class_name(
                     'css-1hwfws3')[1]
                 actions = ActionChains(self.browser)    
                 actions.reset_actions()
                 actions.click(sourceButton)
                 actions.send_keys(
                     jobWebsite)
                 actions.pause(2)
                 # actions.send_keys(Keys.UP)
                 actions.send_keys(Keys.ENTER)
                 actions.perform()
                 actions.reset_actions()
                 #
             self.browser.find_elements_by_tag_name('button')[9].click()
             wait.until(EC.url_matches(
                 'https://www.interview-db.com/profile/job-search'))
             # time.sleep(10)
         else:
             skipCount += 1
             
             if skipCount > 29:
                 multipleSkips = True
         self.browser.get(
             'http://localhost:3000/cover-letter-generator/all-jobs/')
         allJobs = self.browser.find_elements_by_tag_name('a')
         if halfTitleIsPresent or fullTitleIsPresent:
             print('Skipped Job #', i/2, ' skip count is -', skipCount)
         else:
             print('Finished Job #', (i/2))
         i += 2
Esempio n. 8
0
from selenium.webdriver.common.by import By

browser = webdriver.Chrome(ChromeDriverManager().install())

browser.get("https://repl.it/login")

username_input = browser.find_element_by_xpath(
    "/html/body/div/div/div[3]/div[2]/div[1]/div[2]/form/div[1]/div/div/input")

password_input = browser.find_element_by_xpath(
    "/html/body/div/div/div[3]/div[2]/div[1]/div[2]/form/div[2]/div/div/div/input"
)

# login_btn = browser.find_element_by_partial_link_text("Log") #only look for a link tag
login_btn = browser.find_element_by_xpath(
    "/html/body/div/div/div[3]/div[2]/div[1]/div[2]/form/button")

# username_input.send_keys("haneulee")
# password_input.send_keys(input("what is your password?"))
# login_btn.click()

github_btn = browser.find_element_by_xpath(
    "/html/body/div/div/div[3]/div[2]/div[1]/div[1]/a[2]")
github_btn.click()
WebDriverWait(browser, 10).until(EC.url_changes("https://repl.it/~"))

title = WebDriverWait(browser, 10).until(
    EC.presence_of_element_located((By.CLASS_NAME, "username")))

print(title.text)
Esempio n. 9
0
def boat_db_scraper(engine, boat_id):
    """
    Scrapes preliminary text from boat database.
    Parameters:
    engine: Selenium WebDriver
        Local chrome webdriver for Selenium scraping
    boat_id: string
        Unique boat MMSI value
    Returns:
    boat_text_list: list
        Preliminary fields from boat db scrape
    """

    # Initialize list to hold scraped text
    boat_text_list = []

    # Navigate to website for boat_id
    engine.get(
        'https://www.marinetraffic.com/en/data/?asset_type=vessels&columns=flag,shipname,imo,mmsi,ship_type,show_on_live_map,time_of_latest_position,status,year_of_build,length,width,dwt,callsign&quicksearch|begins|quicksearch='
        + str(boat_id))

    # Save url and wait for load
    current_url = engine.current_url
    time.sleep(1.3)

    # Try to click boat link
    try:
        boat_link = engine.find_element_by_xpath(
            '//*[@id="borderLayout_eGridPanel"]/div[1]/div/div/div[3]/div[1]/div/div[1]/div[3]/div/div/a')
        boat_link.click()

    # Boat link wasn't present
    except NoSuchElementException:

        # Try to find no boats on page response
        try:
            no_boat_text = engine.find_element_by_xpath('//*[@id="borderLayout_eGridPanel"]/div[2]/div/div/span').text

            # If no boats, return
            if no_boat_text[0:2] == 'No':
                boat_text_list.append('MMSI:' + str(boat_id))

                return boat_text_list

            # Absent boat warning NOT present: Something else is wrong
            else:

                # Try to reload page and click with larger grace period
                try:
                    engine.get(
                        'https://www.marinetraffic.com/en/data/?asset_type=vessels&columns=flag,shipname,imo,mmsi,ship_type,show_on_live_map,time_of_latest_position,status,year_of_build,length,width,dwt,callsign&quicksearch|begins|quicksearch='
                        + str(boat_id))

                    current_url = engine.current_url
                    time.sleep(3)

                    boat_link = engine.find_element_by_xpath(
                        '//*[@id="borderLayout_eGridPanel"]/div[1]/div/div/div[3]/div[1]/div/div[1]/div[3]/div/div/a')

                    boat_link.click()

                # All methods have failed, boat not in database
                except NoSuchElementException:
                    boat_text_list.append('MMSI:' + str(boat_id))
                    return boat_text_list

        # No boat message is missing
        except NoSuchElementException:

            # Try to reload page and click with larger grace period
            try:
                engine.get(
                    'https://www.marinetraffic.com/en/data/?asset_type=vessels&columns=flag,shipname,imo,mmsi,ship_type,show_on_live_map,time_of_latest_position,status,year_of_build,length,width,dwt,callsign&quicksearch|begins|quicksearch='
                    + str(boat_id))

                current_url = engine.current_url
                time.sleep(3)

                boat_link = engine.find_element_by_xpath(
                    '//*[@id="borderLayout_eGridPanel"]/div[1]/div/div/div[3]/div[1]/div/div[1]/div[3]/div/div/a')

                boat_link.click()

            # All methods have failed, boat not in database
            except NoSuchElementException:
                boat_text_list.append('MMSI:' + str(boat_id))
                
                return boat_text_list

    # Pause until url changes with load
    WebDriverWait(engine, 15).until(ec.url_changes(current_url))
    time.sleep(1)

    # Scrape left data column
    try:
        boat_text_list.extend(engine.find_element_by_class_name('col-xs-6').text.split('\n'))
    except NoSuchElementException:
        pass

    # Scrape right data column
    try:
        boat_text_list.extend(
            engine.find_element_by_xpath('/html/body/main/div/div/div[1]/div[6]/div[1]/div[1]/div/div[2]').text.split(
                '\n'))
    except NoSuchElementException:
        pass

    # Scrape general tab
    try:
        boat_text_list.extend(engine.find_element_by_id('vessel_details_general').text.split('\n'))
    except NoSuchElementException:
        pass

    # Scrape extra name field as fall-back
    try:
        boat_text_list.append('Name: ' + engine.find_element_by_xpath(
            '/html/body/main/div/div/div[1]/div[5]/div/div[2]/div[1]/div[1]/h1').text)
    except NoSuchElementException:
        pass

    # Scrape photo hyperlink if present
    try:
        boat_text_list.append(engine.find_element_by_xpath('//*[@id="big-image"]/img').get_attribute('src'))
    except NoSuchElementException:
        boat_text_list.append('Photo:NULL')

    return boat_text_list
def login():
    with open (FileFullPathInfo, 'a', encoding='utf-8') as txt:

        print('Нажатие кнопки Войти в ЛК')

        log_txt_1 = f'=== Авторизация на сайте ===\n'
        log_txt_2 = f'{datetime.now()}: Нажатие кнопки Войти в ЛК\n'
        logfile = log_txt_1 + log_txt_2
        txt.write(logfile)

        try:
            browser.find_element_by_xpath('/html/body/div/header/div/div/div[4]/a[1]').click()
            timer.sleep(2)
        except ElementNotInteractableException:
            browser.find_element_by_xpath('/html/body/div/header/a').click()
            browser.find_element_by_xpath('/html/body/div/header/div/div/div[4]/a[1]').click()

        print('Подтверждение ЕСИА')
        logfile = f'{datetime.now()}: Подтверждение ЕСИА\n'
        txt.write(logfile)

        try:
            esia_check = browser.find_element_by_xpath('//*[@id="esia_agree"]')
            esia_check.click()
            timer.sleep(4)
        except ElementNotInteractableException:
            browser.find_element_by_xpath('//*[@id="esia"]/div/div/div[2]/div/label').click()
            WebDriverWait(browser, 60).until(EC.url_changes(browser.current_url))

        print('Нажатие кнопки Подтвердить условия ЕСИА')
        logfile = f'{datetime.now()}: Нажатие кнопки Подтвердить условия ЕСИА\n'
        txt.write(logfile)

        browser.find_element_by_xpath('//*[@id="esia_login"]').click()
        timer.sleep(4)

        try:
            print('Ввод логина и пароля в ЕСИА')
            logfile = f'{datetime.now()}: Ввод логина и пароля в ЕСИА\n'
            txt.write(logfile)
            login_form = browser.find_element_by_xpath('//*[@id="mobileOrEmail"]')
            login_form.send_keys(LOGIN)
            pass_form = browser.find_element_by_xpath('//*[@id="password"]')
            pass_form.send_keys(PASSWORD)
        except ElementNotInteractableException:
            print('Ввод пароля в ЕСИА')
            pass_form = browser.find_element_by_xpath('//*[@id="password"]')
            pass_form.send_keys(PASSWORD)

        try:
            print('Нажатие кнопки Войти')
            logfile = f'{datetime.now()}: Нажатие кнопки Войти\n'
            txt.write(logfile)

            browser.find_element_by_xpath('//*[@id="loginByPwdButton"]/span').click()
            timer.sleep(5)
        except TimeoutException:
            alert_phone = browser.find_element_by_xpath('//*[@id="authnFrm"]/div[1]/div[3]/dl[1]/dd/div/div/span')
            alert = browser.find_element_by_xpath('//*[@id="authnFrm"]/div[1]/div[3]/div[2]/div/span')
            logfile = f'{datetime.now()}: Ошибка: {alert.text if alert.text else alert_phone.text}\nВыход из программы.'
            txt.write(logfile)
            browser.close()
            quit()

        print('Переход В ЛК, в раздел Рабочая область')
        logfile = f'=== Завершение авторизации ===\n'
        txt.write(logfile)
        logfile = f'{datetime.now()}: Переход В ЛК, в раздел Рабочая область\n'
        txt.write(logfile)
Esempio n. 11
0
    def go_back(self):
        current_url = self.selenium.current_url
        self.selenium.back()

        WebDriverWait(self.selenium,
                      5).until(expected_conditions.url_changes(current_url))
Esempio n. 12
0
def login_valid_user(selenium, user_infos):
    login(selenium, user_infos)
    WebDriverWait(selenium, 10).until(EC.url_changes(f"{URL}login"))
    return user_infos
Esempio n. 13
0
    def getBusinesses(self, oldUrl):
        try:
            # wait until url is changed
            WebDriverWait(self.driver, 30).until(EC.url_changes(oldUrl))
            time.sleep(2)

            businesses = []
            businesses = self.driver.find_elements_by_xpath("//div[@class='section-result-content']")

            for business in businesses:
                business_dict = {
                    "Business Name": "",
                    "Location": self.city,
                    "Business Website": "",
                }

                try:
                    company_name = business.find_element_by_xpath(".//h3[@class='section-result-title']/span").text
                    if company_name:
                        business_dict["Business Name"] = company_name.strip()

                        try:
                            business_dict["Business Website"] = business.find_element_by_xpath(".//div[@class='section-result-action-container']/div[1]/a[@href]").get_attribute("href")
                        except:
                            pass
                        
                        self.businessList.append(business_dict)

                        if self.useApiFlag:
                            ownerName, linkedin_url, jobTitle = self.getOwnerName(business_dict)

                            if ownerName:
                                ownerEmail = self.getOwnerInfo(ownerName, business_dict)
                            else:
                                ownerEmail = ""

                        else:
                            ownerName, linkedin_url, jobTitle, ownerEmail = "", "", "", ""

                        final_dict = business_dict.copy()
                        final_dict["Business Website"] = self.convertURLToDomain(final_dict["Business Website"])

                        if ownerName:
                            final_dict["First Name"] = ownerName.rsplit(" ", 1)[1]
                            final_dict["Last Name"] = ownerName.rsplit(" ", 1)[0]
                        else:
                            final_dict["First Name"] = ""
                            final_dict["Last Name"] = ""

                        final_dict["Job Title"] = jobTitle
                        final_dict["Contact Website"] = linkedin_url
                        final_dict["Email"] = ownerEmail

                        if ownerName:
                            final_dict["Source"] = "API"
                        else:
                            final_dict["Source"] = "Google"

                        # insert data to csv file.
                        file_exist = os.path.isfile(output_csv_path)
                        with open(output_csv_path, 'a', newline="", encoding="utf-8") as output_file:
                            fieldnames = ["First Name", "Last Name", "Job Title", "Location", "Email", "Source", "Business Name", "Business Website", "Contact Website"]
                            writer = csv.DictWriter(output_file, fieldnames=fieldnames)

                            # wirte fileds if file not exist
                            if not file_exist:
                                writer.writeheader()

                            writer.writerow(final_dict)

                        print("-----------------------------------")
                        print("Found `{}` Business".format(final_dict["Business Name"]))
                        print("Domain: ", final_dict["Business Website"])

                        if self.useApiFlag:
                            print("Name: ", final_dict["First Name"], final_dict["Last Name"])
                            print("Job Title: ", final_dict["Job Title"])
                            print("Email: ", final_dict["Email"])

                except:
                    traceback.print_exc()
                    continue
                
        except:
            traceback.print_exc()
            pass

        
        # pagination
        try:
            pagination_element = self.driver.find_element_by_xpath("//button[@aria-label=' Next page ']")
            
            if pagination_element:
                if pagination_element.is_enabled() and pagination_element.is_displayed():
                    current_url = self.driver.current_url
                    pagination_element.click()
                    print("----- Clicked Page Next Button -----")
                    return True, current_url
            else:
                print("Can not Find Pagination Button.")
            
            return False, ""
        except NoSuchElementException:
            print("Can not Find Pagination Button.")
            return False, ""
        except WebDriverException:
            print("Next Button Not Clickable.")
            return False, ""
        except:
            traceback.print_exc()
            return False, ""
Esempio n. 14
0
def test_carts_add(baseurl_option, browser):
    browser.get(baseurl_option)
    buttons = browser.find_elements_by_xpath('//div[3]/button[1]')
    for button in buttons:
        button.click()
    WebDriverWait(browser, 5).until(EC.url_changes(baseurl_option))
Esempio n. 15
0
    driver = webdriver.Chrome('chromedriver.exe')

actions = ActionChains(driver)

driver.get("https://discord.com/login")
time.sleep(1)
inputName = driver.find_element_by_name("email")
inputName.clear()
inputName.send_keys(conf['email'])
inputPass = driver.find_element_by_name("password")
inputPass.clear()
inputPass.send_keys(conf['password'])
inputPass.send_keys(Keys.RETURN)

wait = WebDriverWait(driver, 10)
element = wait.until(EC.url_changes("https://discord.com/login"))

driver.get(conf['channel'])
time.sleep(7)

theBody = driver.find_element_by_tag_name('body')

count = 1

try:
    while True:
        if conf['customMessage'] != "":
            text = conf['customMessage']
        else:
            text = random.choice(sentences)
        textArea = driver.find_elements_by_css_selector(
Esempio n. 16
0
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

options = Options()
options.add_argument("--headless")
first = "9"
second = "10"
driver = webdriver.Firefox(executable_path='/usr/local/bin/geckodriver',
                           options=options)
#driver = webdriver.Firefox(firefox_options=options);
driver.get("http://192.168.1.48:8070/websample/index.jsp")
current_url = driver.current_url
assert "Addition" in driver.title
elem = driver.find_element_by_name("first")
elem.send_keys(first)
elem = driver.find_element_by_name("second")
elem.send_keys(second)
elem.send_keys(Keys.RETURN)

# wait for URL to change with 15 seconds timeout
WebDriverWait(driver, 15).until(EC.url_changes(current_url))

# print new URL
new_url = driver.current_url
print(new_url)
element = driver.find_element_by_id("result")
print(element.text)
assert element.text == 'Sum is 19'
driver.close()