def hover_and_click(driver, hover_selector, click_selector, hover_by=By.CSS_SELECTOR, click_by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT): """ Fires the hover event for a specified element by a given selector, then clicks on another element specified. Useful for dropdown hover based menus. @Params driver - the webdriver object (required) hover_selector - the css selector to hover over (required) click_selector - the css selector to click on (required) hover_by - the method to search by (Default: By.CSS_SELECTOR) click_by - the method to search by (Default: By.CSS_SELECTOR) timeout - number of seconds to wait for click element to appear after hover """ start_ms = time.time() * 1000.0 stop_ms = start_ms + (timeout * 1000.0) element = driver.find_element(by=hover_by, value=hover_selector) hover = ActionChains(driver).move_to_element(element) hover.perform() for x in range(int(timeout * 10)): try: element = driver.find_element(by=click_by, value="%s" % click_selector).click() return element except Exception: now_ms = time.time() * 1000.0 if now_ms >= stop_ms: break time.sleep(0.1) raise NoSuchElementException( "Element {%s} was not present after %s seconds!" % (click_selector, timeout))
def _drag_marker_on_map(self, endx, endy): actions = ActionChains(self.driver) marker = self.driver.find_elements_by_css_selector( '.leaflet-marker-pane img')[0] actions.drag_and_drop_by_offset(marker, endx, endy) actions.perform()
def test_patches_hover_still_works_when_a_seleciton_is_preselcted(output_file_url, selenium): # This tests an edge case interaction when Patches (specifically) is used # with a tool that requires hit testing e.g. HitTool AND a selection is # pre-made on the data source driving it. plot = Plot( x_range=Range1d(0, 100), y_range=Range1d(0, 100), min_border=0 ) source = ColumnDataSource(dict( xs=[[0, 50, 50, 0], [50, 100, 100, 50]], ys=[[0, 0, 100, 100], [0, 0, 100, 100]], color=['pink', 'blue'] )) source.selected = { '0d': {'glyph': None, 'indices': []}, '1d': {'indices': [1]}, '2d': {} } plot.add_glyph(source, Patches(xs='xs', ys='ys', fill_color='color')) plot.add_tools(HoverTool()) plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') save(plot) selenium.get(output_file_url) assert has_no_console_errors(selenium) # Hover plot and test no error canvas = selenium.find_element_by_tag_name('canvas') actions = ActionChains(selenium) actions.move_to_element_with_offset(canvas, 100, 100) actions.perform() # If this assertion fails then there were likely errors on hover assert has_no_console_errors(selenium)
def goto_options_item(self, link): """click on a group management option link""" if not link in self._links: raise ValueError("invalid link name: '%s'", link) base = self.owner.find_element(self.locators["base"]) # hover mouse over the group manager toolbar to expand it actionProvider = ActionChains(self.owner._browser).move_to_element(base) self.logger.debug("moving mouse over options dropdown") actionProvider.perform() # move the mouse to the correct link and click it loc = self.locators[link] e = self.owner.find_element(loc, base) # moving to the element does not work in this case # I think because the browser's popup window for the url # blocks the element? either way, we can click the element # just by having the options menu open. # actionProvider = ActionChains(self.owner._browser)\ # .move_to_element(e) # actionProvider.perform() self.owner.wait_for_page_element_displayed(loc) self.logger.debug("clicking drowdown menu option '%s': %s" % (link, loc)) e.click()
def test_copy_from_language(self): self._login() self.driver.get('%s/it/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))) # check if there are no plugins in italian version of the page italian_plugins = self.page.placeholders.all()[0].get_plugins_list('it') self.assertEqual(len(italian_plugins), 0) build_button = self.driver.find_element_by_css_selector('.cms-toolbar-item-cms-mode-switcher a[href="?%s"]' % get_cms_setting('CMS_TOOLBAR_URL__BUILD')) build_button.click() submenu = self.driver.find_element_by_css_selector('.cms-dragbar .cms-submenu') hov = ActionChains(self.driver).move_to_element(submenu) hov.perform() submenu_link_selector = '.cms-submenu-item a[data-rel="copy-lang"][data-language="en"]' WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, submenu_link_selector))) copy_from_english = self.driver.find_element_by_css_selector(submenu_link_selector) copy_from_english.click() # Done, check if the text plugin was copied and it is only one WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.cms-draggable:nth-child(1)'))) italian_plugins = self.page.placeholders.all()[0].get_plugins_list('it') self.assertEqual(len(italian_plugins), 1) plugin_instance = italian_plugins[0].get_plugin_instance()[0] self.assertEqual(plugin_instance.body, 'test')
def _drag_page_el_2_page_el(step, elemant_name_src, element_name_target): src = get_visible_page_element(elemant_name_src) target = get_visible_page_element(element_name_target) action_chain = ActionChains(world.browser.driver) action_chain.drag_and_drop(src, target) action_chain.perform()
def mouse_over(self): LOG.debug('Mouse over element %s' % str(self.locator)) self.scroll_into_view() element = self.get_element() chain = ActionChains(self.driver) chain.move_to_element(element) chain.perform()
def edit_environment(self, old_name, new_name): el_td = self.driver.find_element_by_css_selector( 'tr[data-display="{0}"] '.format(old_name) + 'td[data-cell-name="name"]') el_pencil = el_td.find_element_by_css_selector( 'button.ajax-inline-edit') # hover to make pencil visible hover = ActionChains(self.driver).move_to_element(el_td) hover.perform() el_pencil.click() # fill in inline input el_inline_input = self.driver.find_element_by_css_selector( 'tr[data-display="{0}"] '.format(old_name) + 'td[data-cell-name="name"] .inline-edit-form input') el_inline_input.clear() el_inline_input.send_keys(new_name) # click submit el_submit = self.driver.find_element_by_css_selector( 'tr[data-display="{0}"] '.format(old_name) + 'td[data-cell-name="name"] .inline-edit-actions' + ' button[type="submit"]') el_submit.click()
def _drag_page_el_2_thing(step, element_name, pick, find_pattern): src = get_visible_page_element(element_name) target = _get_visible_element(finder_function, pick, find_pattern) action_chain = ActionChains(world.browser.driver) action_chain.drag_and_drop(src, target) action_chain.perform()
def move_to_element(loc, **kwargs): """ Moves to an element. Args: loc: A locator, expects either a string, WebElement, tuple. Returns: It passes `loc` through to make it possible to use in case we want to immediately use the element that it is being moved to. """ brand = "//div[@id='page_header_div']//div[contains(@class, 'brand')]" wait_for_ajax() el = element(loc, **kwargs) move_to = ActionChains(browser()).move_to_element(el) try: move_to.perform() except MoveTargetOutOfBoundsException: # ff workaround execute_script("arguments[0].scrollIntoView();", el) if elements(brand) and not is_displayed(brand): # If it does it badly that it moves whole page, this moves it back try: execute_script("arguments[0].scrollIntoView();", element(brand)) except MoveTargetOutOfBoundsException: pass move_to.perform() return el
def hover(self): element = self._root_element.find_element(*self._name_locator) # Workaround for Firefox chain = ActionChains(self.selenium).move_to_element(element) if "firefox" in self.selenium.desired_capabilities["browserName"]: chain.move_by_offset(0, element.size['height']) chain.perform()
def search_field_focus(self): plaform_submenu_trigger = self.find_element(*self._platform_submenu_trigger_locator) search_field = self.find_element(*self._search_field_locator) focus = ActionChains(self.selenium).move_to_element(search_field).click() focus.perform() # this can be a bit flaky sometimes self.wait.until(lambda s: not plaform_submenu_trigger.is_displayed())
def test_create_delete_user(self): user_name = "test_user_ui_%s" % str(random.randint(1,100)) driver = self.driver driver.maximize_window() driver.get(self.base_url + "/") driver.find_element_by_name("username").send_keys(config.username) driver.find_element_by_name("password").send_keys(config.password) driver.find_element_by_css_selector("input.loginSubmit").click() Move = ActionChains(driver).move_to_element(driver.find_element_by_link_text("Settings")) Move.perform() driver.find_element_by_link_text("Users").click() #Creation driver.find_element_by_link_text("Create User").click() driver.find_element_by_id("name").send_keys(user_name) driver.find_element_by_id("email").send_keys("*****@*****.**" % user_name) driver.find_element_by_id("password").send_keys("password") driver.find_element_by_id("confirm_password").send_keys("password") driver.find_element(By.XPATH, "//input[@id='select_tenant_id']").click() driver.find_element_by_link_text("openstack").click() driver.find_element(By.XPATH, "//input[@id='select_role_id']").click() driver.find_element_by_link_text("Member").click() driver.find_element_by_name("_action_save").click() time.sleep(3) self.assertTrue(driver.find_element_by_xpath( "//tbody/tr/td[text()='%s']" % user_name).is_displayed()) #deletion driver.find_element_by_xpath( "//*[text()='%s']/../td/input[@type='checkbox']" % user_name).click() driver.find_element_by_name("_action_delete").click() driver.find_element_by_id("btn-confirm").click() time.sleep(1) self.assertFalse(self.is_element_present( By.XPATH,"//tbody/tr/td[text()='%s']" % user_name))
def parse(self, response): self.driver.get(response.url) while True: time.sleep(1) try: WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//p[@class="propertyName"]/a'))) except TimeoutException: return resp = TextResponse(url=self.driver.current_url, body=self.driver.page_source, encoding='utf-8') urls = resp.xpath('//p[@class="propertyName"]/a/@href').extract() pprint(urls) #urls=['http://www.magicbricks.com/propertyDetails/270-Sq-ft-Studio-Apartment-FOR-Sale-Vatika-City-in-Gurgaon&id=4d423230333337333839?from=search'] if len(urls) == 0: return for url in urls: abs_url = 'http://www.squareyards.com' + url yield scrapy.Request(abs_url, callback=self.parse_property_info) try : link = self.driver.find_element_by_xpath('//ul[@class="newpagination"]/li[2]') actions = ActionChains(self.driver) actions.click(link) actions.perform() except: return
def fetch_data(graph, data, f, t): bars = graph.find_elements_by_xpath('./*') for bar in bars: hov = ActionChains(driver).move_to_element(bar) hov.perform() date = get_date(graph) timeout = 20 while date == 'Loading...': ## TODO: add a timeout if timeout <= 0: return else: time.sleep(0.5) timeout -= 1 print 'Waiting for bars to load...' hov.perform() date = get_date(graph) if date != "No results found." and date not in data and date: # multiply unix timestamp by 1000 since highcharts is in ms timestamp = get_unix_timestamp(date) * 1000 data[timestamp] = get_price(graph) print 'Scraped: ' + str(timestamp) + ' ' + str(date)
def bot_mitigation(webdriver): """ performs three optional commands for bot-detection mitigation when getting a site """ # bot mitigation 1: move the randomly around a number of times window_size = webdriver.get_window_size() num_moves = 0 num_fails = 0 while num_moves < NUM_MOUSE_MOVES + 1 and num_fails < NUM_MOUSE_MOVES: try: if num_moves == 0: #move to the center of the screen x = int(round(window_size['height']/2)) y = int(round(window_size['width']/2)) else: #move a random amount in some direction move_max = random.randint(0,500) x = random.randint(-move_max, move_max) y = random.randint(-move_max, move_max) action = ActionChains(webdriver) action.move_by_offset(x, y) action.perform() num_moves += 1 except MoveTargetOutOfBoundsException: num_fails += 1 #print "[WARNING] - Mouse movement out of bounds, trying a different offset..." pass # bot mitigation 2: scroll in random intervals down page scroll_down(webdriver) # bot mitigation 3: randomly wait so that page visits appear at irregular intervals time.sleep(random.randrange(RANDOM_SLEEP_LOW, RANDOM_SLEEP_HIGH))
def move_to_element(loc, **kwargs): """ Moves to an element. Args: loc: A locator, expects either a string, WebElement, tuple. Returns: Returns the element it was moved to to enable chaining. """ brand = "//div[@id='page_header_div']//div[contains(@class, 'brand')]" wait_for_ajax() el = element(loc, **kwargs) if el.tag_name == "option": # Instead of option, let's move on its parent <select> if possible parent = element("..", root=el) if parent.tag_name == "select": move_to_element(parent) return el move_to = ActionChains(browser()).move_to_element(el) try: move_to.perform() except MoveTargetOutOfBoundsException: # ff workaround execute_script("arguments[0].scrollIntoView();", el) if elements(brand) and not is_displayed(brand): # If it does it badly that it moves whole page, this moves it back try: execute_script("arguments[0].scrollIntoView();", element(brand)) except MoveTargetOutOfBoundsException: pass try: move_to.perform() except MoveTargetOutOfBoundsException: # This has become desperate now. raise exceptions.CannotScrollException( "Despite all the workarounds, scrolling to `{}` was unsuccessful.".format(loc)) return el
def step_impl(context): slider = context.browser.find_element_by_xpath("//body/div[@id='slider-range']") height = slider.size['height'] bottom_marker = context.browser.find_element_by_xpath("//body/div[@id='slider-range']/span[1]") actions = ActionChains(context.browser) actions.click_and_hold(bottom_marker).move_by_offset(0,-height).release().perform() time.sleep(1)
def initial_unpledge(self): pledge_button = self._webd_wrap._driver.find_element_by_class_name("l-710px").find_element_by_xpath("section/div[2]/a/img") if pledge_button is None: raise Exception('pledge button not found') element_to_hover_over = pledge_button hover = ActionChains(self._webd_wrap._driver).move_to_element(element_to_hover_over) hover.perform() self._webd_wrap._driver.execute_script('(arguments[0]).click()', pledge_button)
def test_help_modal_opens_and_closes(self): """ The Help modal opens on click and closes on exit or blur """ # Jane has some questions about the tool and sees a 'help' button in the menu. She # clicks it and is shown a window describing the Video Concept browser and a # point of contact at OEIT. self.student_log_in_complete() class_name = self.new_class_session.class_name side_nav_menu = self.browser.find_elements_by_class_name('nav-header') self.assertTrue(class_name, [nav_header.text for nav_header in side_nav_menu]) # Check the help modal is present but not viewable help_modal = self.browser.find_element_by_id('help_modal') self.assertFalse(help_modal.is_displayed()) help_link = self.browser.find_element_by_class_name('nav-help') help_link.click() modal_wait = WebDriverWait(self.browser, 5).until( EC.visibility_of_element_located((By.ID,'help_modal'))) self.assertTrue(help_modal.is_displayed()) modal_background = self.browser.find_element_by_class_name('reveal-modal-bg') self.assertTrue(modal_background.is_displayed()) close_modal_link = self.browser.find_element_by_class_name('close-reveal-modal') close_modal_link.click() modal_wait = WebDriverWait(self.browser, 5).until( EC.invisibility_of_element_located((By.ID,'help_modal'))) modal_wait = WebDriverWait(self.browser, 5).until( EC.invisibility_of_element_located((By.CLASS_NAME,'reveal-modal-bg'))) self.assertFalse(help_modal.is_displayed()) # Repeat but test close by clicking outside of the modal help_link.click() modal_wait = WebDriverWait(self.browser, 5).until( EC.visibility_of_element_located((By.ID,'help_modal'))) self.assertTrue(help_modal.is_displayed()) self.assertTrue(modal_background.is_displayed()) builder = ActionChains(self.browser) click_outside_modal = builder.move_to_element(modal_background) \ .move_by_offset(-500, -300) \ .click() click_outside_modal.perform() modal_wait = WebDriverWait(self.browser, 5).until( EC.invisibility_of_element_located((By.ID,'help_modal'))) modal_wait = WebDriverWait(self.browser, 5).until( EC.invisibility_of_element_located((By.CLASS_NAME,'reveal-modal-bg'))) self.assertFalse(help_modal.is_displayed())
def _grab_current_hours(): """ This method will open a chrome window, log the user in to okta with their workiva email, open ADP Workforce, go to their timecard, and return their current hours. """ driver = webdriver.Chrome() driver.get(se.ADP["okta"]) wait = WebDriverWait(driver, 10) wait.until(EC.element_to_be_clickable((By.ID, "signin-button"))) username = driver.find_element_by_id("user-signin") username.send_keys(se.ADP["username"]) password = driver.find_element_by_id("pass-signin") password.send_keys(se.ADP["password"]) sign_in = driver.find_element_by_id("signin-button") sign_in.click() driver.get("%shome/adp_workforce_now/" % se.ADP["okta"] + \ "0oac91hvcoOAQQBOYUYZ/3311") element = wait.until(EC.element_to_be_clickable((By.ID, "Myself_navItem"))) nav_item = driver.find_element_by_id("Myself_navItem") # Hover to show menu buttons. action = ActionChains(driver) action.move_to_element(nav_item) action.perform() driver.find_element_by_id("Myself_ttd_Time&Attendance_MyTimeEntry").click() driver.implicitly_wait(7)
def test_t_511_links_transporting_a_boat(self, windows_handles=None): print '\n' + "Verifying: Links -> Transporting a Boat" driver = self.driver driver.maximize_window() actions = ActionChains(driver) menu = driver.find_element_by_xpath("//*[contains(text(), 'Links')]") actions.move_to_element(menu).perform() time.sleep(3) contactIDLink = driver.find_element_by_xpath("//*[contains(text(), 'Transporting a Boat')]") actions.move_to_element(contactIDLink).click().perform() driver.switch_to.window(driver.window_handles[-1]) transABoatPageVerification = WebDriverWait(driver, 20).until(EC.presence_of_element_located( (By.XPATH, "//*[contains(text(), 'Pacific States Marine Fisheries Commission')]"))) transBoatTitle = driver.title assert transBoatTitle == 'Pacific States Marine Fisheries Commission', 'The Transporting a Boat page link did not load properly'
def test_o_511_links_roadway_weather_forecasts(self, windows_handles=None): print '\n' + "Verifying: Links -> National Roadway Weather Forecasts" driver = self.driver driver.maximize_window() actions = ActionChains(driver) menu = driver.find_element_by_xpath("//*[contains(text(), 'Links')]") actions.move_to_element(menu).perform() time.sleep(3) contactIDLink = driver.find_element_by_xpath("//*[contains(text(), 'Roadway Weather Forecasts')]") actions.move_to_element(contactIDLink).click().perform() driver.switch_to.window(driver.window_handles[-1]) roadwayWeatherPageVerification = WebDriverWait(driver, 20).until(EC.presence_of_element_located( (By.XPATH, "//*[contains(text(), 'National Weather Service - NWS Pocatello')]"))) roadwayWeatherTitle = driver.title assert roadwayWeatherTitle == 'National Weather Service - NWS Pocatello', 'The National Roadway Weather Forecasts page link did not load properly'
def test_f_511_tourist_info_rest_areas(self, window_handles=None): ### 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 ### RELATIVE X-PATH ### 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 print '\n' + "Verifying: Tourist Info -> Rest Areas" driver = self.driver driver.maximize_window actions = ActionChains(driver) menu = driver.find_element_by_xpath("//*[contains(text(), 'Tourist Info')]") actions.move_to_element(menu).perform() time.sleep(3) restAreasLink = driver.find_element_by_xpath("//*[@id='ddsubmenu2']/li[2]/a") actions.move_to_element(restAreasLink).click().perform() driver.switch_to.window(driver.window_handles[-1]) restAreasTitle = driver.title assert restAreasTitle == 'Road Maintenance | Idaho Transportation Department', 'The Rest Areas page link did not work correctly'
def test_n_511_links_national_weather_service(self, windows_handles=None): print '\n' + "Verifying: Links -> National Weather Service" driver = self.driver driver.maximize_window() actions = ActionChains(driver) menu = driver.find_element_by_xpath("//*[contains(text(), 'Links')]") actions.move_to_element(menu).perform() time.sleep(3) contactIDLink = driver.find_element_by_xpath("//*[contains(text(), 'National Weather Service')]") actions.move_to_element(contactIDLink).click().perform() driver.switch_to.window(driver.window_handles[-1]) nationalWeatherPageVerification = WebDriverWait(driver, 20).until(EC.presence_of_element_located( (By.XPATH, "//*[contains(text(), 'Western Region Headquarters')]"))) nationalWeatherServiceTitle = driver.title assert nationalWeatherServiceTitle == 'Western Region Headquarters', 'The National Weather Service page link did not load properly'
def init_browser(self): if True:# self.browser_type == "firefox".upper(): # adding adblock to the system #binary = FirefoxBinary(r"Drivers/firefox/bin/firefox.exe") # firefox_profile = FirefoxProfile() # if self.adblock is True: # adblock_path = os.path.abspath('Drivers/adblock_plus-2.7.3-sm+tb+fx+an.xpi') # firefox_profile.add_extension(extension=adblock_path) binary = FirefoxBinary(self.browser_path) self.browser = Firefox(firefox_binary=binary) # muting the browser action = ActionChains(self.browser) action.send_keys(Keys.CONTROL + "m") action.perform() elif self.browser_type == "Edge".upper(): edge_driver = "Drivers/EdgeDriver" os.environ["webdriver.edge.driver"] = edge_driver self.browser = Edge(edge_driver) elif self.browser_type == "PHANTOMJS": user_agent = ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) " + "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36" ) dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = user_agent self.browser = PhantomJS("Drivers/phantomjs.exe", desired_capabilities=dcap)
def test_filter_tenants(self): driver = self.driver driver.maximize_window() driver.get(self.base_url + "/") driver.find_element_by_name("username").send_keys(config.username) driver.find_element_by_name("password").send_keys(config.password) driver.find_element_by_css_selector("input.loginSubmit").click() Move = ActionChains(driver).move_to_element(driver.find_element_by_link_text("Settings")) Move.perform() driver.find_element_by_link_text("Tenants").click() driver.find_element_by_link_text("Create Tenant").click() driver.find_element_by_id("name").send_keys("Test_tenant") driver.find_element_by_id("description").send_keys("Test_description") driver.find_element_by_id("enabled").click() driver.find_element_by_name("_action_save").click() driver.find_element_by_xpath("//div/input[@type='text']").send_keys("Test_tenant") time.sleep(1) self.assertTrue(driver.find_element_by_xpath("//tbody/tr/td[text()='Test_tenant']").is_displayed()) self.assertFalse(driver.find_element_by_xpath("//tbody/tr/td[text()='services']").is_displayed()) _all_elem = len(driver.find_elements_by_xpath("//tbody/tr")) _elem_disp = len(driver.find_elements_by_xpath("//tbody/tr[@style='display: none;']")) self.assertTrue(_all_elem - _elem_disp==int(driver.find_elements_by_xpath("//div/label")[1].text)) elements = driver.find_elements_by_xpath("//tbody/tr/td/a") time.sleep(1) for i in elements: if i.is_displayed() != 0: i.click() break time.sleep(5) driver.find_element_by_xpath('//*[@id="delete"]/span/div').click() driver.find_element_by_xpath('//*[@id="btn-confirm"]/span').click() #alert = driver.switch_to_alert() #alert.accept() self.assertFalse(self.is_element_present(By.XPATH, "//tbody/tr/td[text()='Test_tenant']"))
def test_p_511_links_I15_construction(self, windows_handles=None): print '\n' + "Verifying: Links -> I15 Construction" driver = self.driver driver.maximize_window() actions = ActionChains(driver) menu = driver.find_element_by_xpath("//*[contains(text(), 'Links')]") actions.move_to_element(menu).perform() time.sleep(3) contactIDLink = driver.find_element_by_xpath("//*[contains(text(), 'I-15 Construction')]") actions.move_to_element(contactIDLink).click().perform() driver.switch_to.window(driver.window_handles[-1]) i15PageVerification = WebDriverWait(driver, 20).until(EC.presence_of_element_located( (By.XPATH, "//*[contains(text(), 'I-15 Construction | Idaho Transportation Department')]"))) i5Title = driver.title assert i5Title == 'I-15 Construction | Idaho Transportation Department', 'The I15 Construction page link did not load properly'
def put_element_on_grid(workspace_page, element_str): '''find and get the 'assembly', and the div for the grid object''' browser = workspace_page.browser for retry in range(3): try: assembly = workspace_page.find_library_button(element_str) chain = ActionChains(browser) chain.click_and_hold(assembly) chain.move_by_offset(-100, 0).perform() except StaleElementReferenceException: if retry < 2: logging.warning('put_element_on_grid %s:' ' StaleElementReferenceException', element_str) else: raise else: break grid = browser.find_element_by_xpath('//div[@id="-dataflow"]') check_highlighting(grid, True, "Grid") release(chain) # deal with the modal dialog name = NameInstanceDialog(workspace_page).create_and_dismiss() # make sure it is on the grid ensure_names_in_workspace(workspace_page, [name], "Dragging '" + element_str + "' to grid did not produce a new element on page") return name
def test_page_elements(self): go_to('http://www.ncbi.nlm.nih.gov/m/pubmed') assert_title('PubMed Mobile') ele = self.browser.find_element_by_id('srch') h = ActionChains(self.browser).move_to_element( ele ) h.perform()
from common.base import Base from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.select import Select driver = webdriver.Firefox() driver.get('https://www.baidu.com') zentao = Base(driver) loc1 = ('link text', '设置') mouse = zentao.findElement(loc1) ActionChains(driver).move_to_element(mouse).perform() loc2 = ('link text', '搜索设置') zentao.click(loc2) loc3 = ('xpath', ".//*[@id='nr']/option[3]") r1 = zentao.findElement(loc3).is_selected() print(r1) loc4 = ('id', 'nr') select = zentao.findElement(loc4) Select(select).select_by_index(2) r2 = zentao.findElement(loc3).is_selected() print(r2)
from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains import time driver = webdriver.Chrome() url = 'http://automationpractice.com/index.php?id_category=3&controller=category' driver.get(url) product_containers = driver.find_elements_by_class_name('product-container') for index, product_container in enumerate(product_containers): hover = ActionChains(driver).move_to_element(product_container) hover.perform() #click on add to cart driver.find_element_by_xpath( '//*[@id="center_column"]/ul/li[%s]/div/div[2]/div[2]/a[1]/span' % (index + 1)).click() time.sleep(1) #click on Continue Shopping driver.find_element_by_xpath( '//*[@id="layer_cart"]/div[1]/div[2]/div[4]/span').click() time.sleep(0.5)
def fill_form(self): #city #<ul unselectable="on" class="k-list k-reset" tabindex="-1" aria-hidden="false" id="CityId_listbox" aria-live="polite" data-role="staticlist" role="listbox"> #<li tabindex="-1" role="option" unselectable="on" class="k-item" data-offset-index="6">Ankara</li></ul> # print("el1") self.select_city() el11 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id11 ' + el11.get_attribute("id")) print('class11 ' + el11.get_attribute("class")) #SELECT STATIONS print("stations dropdown open") print("el2") el2 = self.driver.find_element_by_xpath( """//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[1]/div[4]/div/div/div/div""" ) #//div[@class='k-multiselect-wrap k-floatwrap'] actions = ActionChains(self.driver) actions.move_to_element(el2) actions.pause(3) actions.click(el2) actions.perform() el22 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id22 ' + el22.get_attribute("id")) print('class22 ' + el22.get_attribute("class")) #click stations dropdown print("stations dropdown") wait = WebDriverWait(self.driver, 10) #Changed try: element = wait.until( EC.text_to_be_present_in_element(( By.XPATH, "//div[@id='StationIds-list']//ul[@id='StationIds_listbox']/li[text()='0107000 Antalya- (Antalya-Muratpaşa)']" ), 'Antalya')) except Exception as e: print(e) self.select_city() wait = WebDriverWait(self.driver, 20) element = wait.until( EC.text_to_be_present_in_element(( By.XPATH, "//div[@id='StationIds-list']//ul[@id='StationIds_listbox']/li[text()='0107000 Antalya- (Antalya-Muratpaşa)'" ), 'Antalya')) #to click all the stations use this code """ stitems = self.driver.find_elements_by_xpath("//div[@id='StationId-list']//ul[@id='StationId_listbox']/li") for item in stitems: try: if 'Antalya' in item.text: # print ('item '+item.text+ " : "+item.get_attribute("class")) print ('aqui') actions = ActionChains(self.driver) actions.move_to_element(item) #actions.pause(1) actions.click(item) actions.perform() except Exception as e: print(e) """ #at this moment, just one station is giving values, so just click that one #XPATH: to div where the list of stations; THIS has changed sitem = self.driver.find_element_by_xpath( "//div[@id='StationIds-list']//ul[@id='StationIds_listbox']/li[text()='0107000 Antalya- (Antalya-Muratpaşa)']" ) actions = ActionChains(self.driver) actions.move_to_element(sitem) actions.pause(1) actions.click(sitem) actions.perform() self.names.append(sitem.text) #print ('item1 '+liitems_1.text+ " : "+liitems_1.get_attribute("class")) #liitems_2 = self.driver.find_element_by_xpath("//div[@id='StationId-list']//ul[@id='StationId_listbox']/li[2]") #print ('item1 '+liitems_2.text+ " : "+liitems_2.get_attribute("class")) el33 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id33 ' + el33.get_attribute("id")) print('class33 ' + el33.get_attribute("class")) time.sleep(5) #in case the dropdown does not hide. PROBLEM: focus goes to body #self.driver.execute_script("document.getElementById('StationIds-list').style.display = 'none';") #time.sleep(5) #SELECT HOUR #XPATH //*[@id="StationDataDownloadForm"]/fieldset[1]/div[2]/div[1]/div/div/span/span #OLD XPATH: //*[@id='page-wrapper']/div[2]/form/fieldset[1]/div[2]/div[4]/div/div/span[1]/span print("hours dropdown") el5 = self.driver.find_element_by_xpath( """//*[@id='page-wrapper']/div[2]/form/fieldset[1]/div[2]/div[1]/div/div/span/span""" ) #//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[2]/div[4]/div/div/span/span/span[2]""") actions = ActionChains(self.driver) actions.move_to_element(el5) actions.pause(3) actions.click(el5) actions.perform() el55 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id55 ' + el55.get_attribute("id")) print('class55 ' + el55.get_attribute("class")) #WebDriverWait(self.driver, 30).until(wait_for_display((By.XPATH, '//body/div[11]'))) WebDriverWait(self.driver, 30).until( wait_for_display((By.XPATH, """//*[@id="TimeUnit-list"]"""))) houritem = self.driver.find_element_by_xpath( """//ul[@id='TimeUnit_listbox']/li[text()='1 Saat']""") #for item in paritems: #try: #print ('item '+item.text) if houritem.text is '1 Saat': # print ('item '+item.text+ " : "+item.get_attribute("class")) print('hour ') else: houritem = self.driver.find_element_by_xpath( """//div[@id='TimeUnit-list']//ul[@id='TimeUnit_listbox']/li[1]""" ) actions = ActionChains(self.driver) actions.move_to_element(houritem) actions.pause(2) actions.click(houritem) actions.perform() # break #except Exception as e: # print(e) #SELECT PARAMETERS #Click on parameters dropdown print("parameters dropdown open") #XPATH: to div where the list of parameters; THIS has changed #OLDel4 = self.driver.find_element_by_xpath("""//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[2]/div[1]/div/div/div""")#//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[1]/div[4]/div/div/div/div') param_dd = self.driver.find_element_by_xpath( """//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[2]/div[4]/div/div/div/div""" ) #//*[@id="StationDataDownloadForm"]/fieldset[1]/div[2]/div[4]/div/div/div/div""") print('idparam_dd' + param_dd.get_attribute("id")) print('classparam_dd ' + param_dd.get_attribute("class")) actions = ActionChains(self.driver) actions.move_to_element(param_dd) actions.pause(3) actions.click(param_dd) actions.perform() el44 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id44 ' + el44.get_attribute("id")) print('class44 ' + el44.get_attribute("class")) # #To check element by element (not working everytime) """ paritems = self.driver.find_elements_by_xpath("//div[@id='SelectedParameters-list']//ul[@id='SelectedParameters_listbox']/li") i=0 for item in paritems: try: #print ('item '+item.text) if item.text in params: print ('item '+item.text)#+ " : "+item.get_attribute("class")) print ('param ' +str(i)) actions = ActionChains(self.driver) actions.move_to_element(item) actions.pause(3) actions.click(item) actions.perform() i+=1 except Exception as e: print(e) """ #XPATH: to div where the list of parameters; THIS has changed #OLD: //div[@id='SelectedParameters-list']//ul[@id='SelectedParameters_listbox']/li[text()='PM10'] paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='PM10']""" ) #li[1]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='SO2']""" ) #li[2]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='Hava Basinci']""" ) #li[19]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='Hava Sicakligi']""" ) #li[20]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='Ruzgar Hizi']""" ) #li[39]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() paritem = self.driver.find_element_by_xpath( """//div[@id='Parameters-list']//ul[@id='Parameters_listbox']/li[text()='Ruzgar Yönü']""" ) #li[40]") actions = ActionChains(self.driver) actions.move_to_element(paritem) actions.pause(3) actions.click(paritem) actions.perform() #SEST START DATE AND END DATE #print(datetime.date.today().timestamp()) """ now = datetime.datetime.now().replace(hour=0, minute=0,second=0) day =now.strftime("%Y-%m-%d") print(now.strftime("%Y-%m-%d %H:%M:%S")) print(now.timestamp()) b = int(now.timestamp()) * 1000 print (b) date = datetime.datetime.fromtimestamp(1543356000000 / 1e3) print (date) """ """yesterday = datetime.date.fromordinal(datetime.date.today().toordinal()-1) DAY = 24*60*60 # POSIX day in seconds timestamp = (yesterday.toordinal() - date(1970, 1, 1).toordinal()) * DAY print (timestamp) """ yesterday = datetime.datetime.now() - timedelta( days=1) #now() returns the current time in UTC yesterday.strftime('%m%d%y') yesterdayDate = yesterday.strftime("%Y-%m-%d") yesterdayts = int(yesterday.timestamp()) * 1000 print(yesterdayts) datet = datetime.datetime.fromtimestamp(yesterdayts / 1e3) print(datet) ts01012007 = 1167602400000 #timestap for first day of 2007 """first date <input type="hidden" name="start_TimeStamp" value="1167602400000"> """ self.driver.execute_script( "document.getElementsByName('start_TimeStamp')[0].value='" + str(ts01012007) + "'") """last date <input type="hidden" name="end_TimeStamp" value="1543356000000">""" self.driver.execute_script( "document.getElementsByName('end_TimeStamp')[0].value='" + str(yesterdayts) + "'") """value: datetime.date.today().timestamp() """ #print(datetime.date.today().timestamp()) #SELECT HOUR <-- JUST REPEAT FOR BEING ABLE TO CLICK THE BUTTON (the properties dropdown hides it and clicking to close change focus to body) #XPATH //*[@id="StationDataDownloadForm"]/fieldset[1]/div[2]/div[1]/div/div/span/span #OLD XPATH: //*[@id='page-wrapper']/div[2]/form/fieldset[1]/div[2]/div[4]/div/div/span[1]/span print("hours dropdown") el5 = self.driver.find_element_by_xpath( """//*[@id='page-wrapper']/div[2]/form/fieldset[1]/div[2]/div[1]/div/div/span/span""" ) #//*[@id="page-wrapper"]/div[2]/form/fieldset[1]/div[2]/div[4]/div/div/span/span/span[2]""") actions = ActionChains(self.driver) actions.move_to_element(el5) actions.pause(3) actions.click(el5) actions.perform() el55 = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('id55 ' + el55.get_attribute("id")) print('class55 ' + el55.get_attribute("class")) #WebDriverWait(self.driver, 30).until(wait_for_display((By.XPATH, '//body/div[11]'))) WebDriverWait(self.driver, 30).until( wait_for_display((By.XPATH, """//*[@id="TimeUnit-list"]"""))) houritem = self.driver.find_element_by_xpath( """//ul[@id='TimeUnit_listbox']/li[text()='1 Saat']""") #for item in paritems: #try: #print ('item '+item.text) if houritem.text is '1 Saat': # print ('item '+item.text+ " : "+item.get_attribute("class")) print('hour ') else: houritem = self.driver.find_element_by_xpath( """//div[@id='TimeUnit-list']//ul[@id='TimeUnit_listbox']/li[1]""" ) actions = ActionChains(self.driver) actions.move_to_element(houritem) actions.pause(2) actions.click(houritem) actions.perform() # break #except Exception as e: # print(e) """ Button""" print("button") button = self.driver.find_element_by_xpath( """//button[text()='Sorgula']""") #button.click() actions = ActionChains(self.driver) actions.move_to_element(button).click(button) actions.perform() elbut = self.driver.switch_to.active_element #self.driver.find_element_by_xpath('//*[@id="CityId-list"]/span/input') print('idbut ' + elbut.get_attribute("id")) print('classbut ' + elbut.get_attribute("class")) time.sleep(5) notdone = True i = 0 wait = WebDriverWait(self.driver, 10) while notdone: try: element = wait.until( EC.presence_of_element_located( (By.XPATH, """//*[@id="grid"]/div[1]/a"""))) notdone = False except: if (i < 20): i += 1 else: print('info took too long to load; not today') notdone = False #To download excel click on: //*[@id="grid"]/div[1]/a""" """ print ("excel") excel = self.driver.find_element_by_xpath(" ""//*[@id="grid"]/div[1]/a"" "") actions = ActionChains(self.driver) actions.move_to_element(excel).click(excel) actions.perform() """ #Scroll to the end to be able to press "show all results" html = self.driver.find_element_by_tag_name('html') html.send_keys(Keys.END) print("showall") #Show all drop down ## OPEN DROPDOWN #XPath //*[@id="grid"]/div[5]/span[1]/span/span # (OLD XPATH //*[@id="grid"]/div[4]/span[1]/span/span/span[2]/span showalldrop = self.driver.find_element_by_xpath( """//*[@id="grid"]/div[5]/span[1]/span/span""") actions = ActionChains(self.driver) actions.move_to_element(showalldrop).click(showalldrop) actions.perform() #To show all option click on : #OLD XPATH /html/body/div[11]/div/div[2]/ul/li[1] #XPATH: //body/div[11]/div/div[2]/ul/li[4] showall = self.driver.find_element_by_xpath( """//body/div[11]/div/div[2]/ul/li[4]""") actions = ActionChains(self.driver) actions.move_to_element(showall) actions.pause(3) actions.click(showall) actions.perform() #Now CLICK refresh #XPATH //*[@id="grid"]/div[5]/a[5] print("refresh") refresh = self.driver.find_element_by_xpath( """//*[@id="grid"]/div[5]/a[5]""") actions = ActionChains(self.driver) actions.move_to_element(refresh) actions.pause(3) actions.click(refresh) actions.perform() time.sleep(5) #Parse the tables soup_level2 = BeautifulSoup(self.driver.page_source, 'lxml') tables = soup_level2.find_all('table') headers_table = tables[5] #0 times_table = tables[6] values_table = tables[7] #1 df_headers = pd.read_html(str(headers_table), header=[0, 1]) #print (df_headers[0].columns.tolist) hnames = df_headers[0].columns.get_level_values(level=1) df_values = pd.read_html(str(values_table), header=None, decimal=',', thousands='.') df_times = pd.read_html(str(times_table), header=None) print(df_values[0].columns.tolist) print(df_times[0].columns.tolist) values = df_values[0] #Change columns names #values.columns = ["DateTime", hnames[0], hnames[1],hnames[2],hnames[3],hnames[4],hnames[5]] values.columns = [ hnames[0], hnames[1], hnames[2], hnames[3], hnames[4], hnames[5] ] values["DateTime"] = df_times[0] values["DateTime"] = values["DateTime"].astype(str) #Clean columns names values.columns = values.columns.str.replace( r"\(.*\)", "") #remove all braces and data inside #translate column names values.rename(columns={ 'PM10 ': 'PM10', 'SO2 ': 'SO2', 'HavaSicakligi ': 'air_temperature', 'HavaBasinc ': 'air_preassure', 'RuzgarHizi ': 'wind_speed_ms', 'RuzgarYon ': 'wind_from_direction' }, inplace=True) #oldvalues.rename(columns={'Hava Sicakligi':'air_temperature','Hava Basinci':'air_preassure','Ruzgar Hizi':'wind_speed_ms', 'Ruzgar Yönü':'wind_from_direction'},inplace=True) #reformat date values['DateTime'] = pd.to_datetime( values['DateTime'], format='%d.%m.%Y %H:%M').dt.strftime('%Y-%m-%dT%H:%M+03') #add necessary columns values['station_id'] = self.names[0] values['Latitude'] = 36.887500 values['Longitude'] = 30.726667 try: #values['wind_speed'] = 1.943844 * values['wind_speed_ms'].astype(float) values['wind_speed'] = 1.943844 * pd.to_numeric( values['wind_speed_ms'], errors='coerce') except: print('Cannot transform to knots') print(values.dtypes) df_final = pd.DataFrame() df_final = df_final.append(values, ignore_index=True) #create file touterdir = l_final_path if not os.path.exists(touterdir): os.mkdir(touterdir) toutdir = touterdir + code if not os.path.exists(toutdir): os.mkdir(toutdir) #no subindex folder #ttoutdir = toutdir +'/'+ code+'_1' #if not os.path.exists(ttoutdir): # os.mkdir(ttoutdir) csvfile = str(uuid.uuid4()) + ".csv" tfilename = os.path.join( toutdir, csvfile) #change to ttoutdir if subindex folder #copy to #fpath = l_final_path+code+'/' #filname = fpath + csvfile #create the file with just new values df_final.to_csv(tfilename, mode='w', encoding='utf-8-sig', index=False)
from selenium import webdriver import time import math from selenium.webdriver.common.action_chains import ActionChains link = "http://suninjuly.github.io/find_xpath_form" try: browser = webdriver.Chrome() actions = ActionChains(browser) #open the URL browser.get(link) #wait till next page is loaded # time.sleep(1) #find all text fields and button of the form first_name = browser.find_element_by_name("first_name") last_name = browser.find_element_by_name("last_name") city = browser.find_element_by_class_name("city") country = browser.find_element_by_id("country") button = browser.find_element_by_xpath('//button[@type="submit"]') #filling the form first_name.send_keys('Ivan') last_name.send_keys('Petrov') city.send_keys('Smolensk') country.send_keys('Russia') #scroll to and click button actions.move_to_element( browser.find_element_by_css_selector('button.btn')).perform() button.click()
def spider_index_year(browser, wait): df = pd.read_excel(fpath1 + '/采集关键词.xlsx') # if os.path.isfile(filepath+'/无效关键词.xlsx'): # os.remove(filepath+'/无效关键词.xlsx') fff = open(filepath + '/无效关键词.xlsx', 'a', encoding='utf-8') for value in df['关键字'].values: try: input2 = wait.until( EC.presence_of_element_located(( By.CSS_SELECTOR, '#header > div.wrap.clearfix > div.search > form > input[type="text"]' ))) input2.clear() input2.send_keys(value) browser.find_element_by_css_selector( '#header > div.wrap.clearfix > div.search > form > button' ).click() time.sleep(2) element1 = wait.until( EC.presence_of_element_located(( By.CSS_SELECTOR, '#trend_wrap > svg > g:nth-child(5) > g:nth-child(4) > text:nth-child(2)' ))) if not os.path.exists(filepath + value): os.mkdir(filepath + value) if not os.path.exists(filepath + value + '/whole'): os.mkdir(filepath + value + '/whole') # if not os.path.exists(filepath+value+'/pc'): # os.mkdir(filepath+value+'/pc') # if not os.path.exists(filepath+value+'/mobile'): # os.mkdir(filepath+value+'/mobile') if not os.path.exists(filepath + value + '/whole_值'): os.mkdir(filepath + value + '/whole_值') # if not os.path.exists(filepath+value+'/pc_值'): # os.mkdir(filepath+value+'/pc_值') # if not os.path.exists(filepath+value+'/mobile_值'): # os.mkdir(filepath+value+'/mobile_值') if os.path.isfile(filepath + value + '/baidu_index.txt'): os.remove(filepath + value + '/baidu_index.txt') ff = open(filepath + value + '/baidu_index.txt', 'a', encoding='utf-8') for i in range(5): element = wait.until( EC.presence_of_element_located(( By.CSS_SELECTOR, '#trend_wrap > svg > g:nth-child(5) > g:nth-child(4) > text:nth-child(2) > tspan' ))) if element: ActionChains(browser).move_to_element(element).perform() elif element1: ActionChains(browser).move_to_element(element1).perform() time.sleep(2) browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[1]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[1]/option[' + str(i + 1) + ']').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[2]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[2]/option[1]' ).click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[1]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[1]/option[' + str(i + 1) + ']').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[2]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[2]/option[6]' ).click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/p/button[1]').click() time.sleep(3) if os.path.isfile(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-1' + ".png"): os.remove(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-1' + ".png") browser.save_screenshot(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-1' + ".png") ff.write( str(2013 + i) + ':' + ' ' + value + '_wholeTrence_' + str(i) + '-1' + ".png" + '\n') element = wait.until( EC.presence_of_element_located(( By.CSS_SELECTOR, '#trend_wrap > svg > g:nth-child(5) > g:nth-child(4) > text:nth-child(2) > tspan' ))) # browser.refresh() # time.sleep(3) element2 = wait.until( EC.presence_of_element_located(( By.CSS_SELECTOR, '#trend_wrap > svg > g:nth-child(5) > g:nth-child(4) > g:nth-child(8) > text > tspan' ))) ActionChains(browser).move_to_element(element2).perform() ActionChains(browser).move_to_element(element).perform() time.sleep(2) browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[2]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[1]/dd/select[2]/option[7]' ).click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[2]').click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/dl[2]/dd/select[2]/option[12]' ).click() browser.find_element_by_xpath( '//*[@id="trend_wrap"]/div[2]/p/button[1]').click() time.sleep(3) if os.path.isfile(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-2' + ".png"): os.remove(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-2' + ".png") browser.save_screenshot(filepath + value + '/whole/' + value + '_wholeTrence_' + str(i) + '-2' + ".png") ff.write( str(2013 + i) + ':' + ' ' + value + '_wholeTrence_' + str(i) + '-2' + ".png" + '\n') ff.close() time.sleep(random.randint(2, 6)) except Exception as e: fff.write(value + '\n') print(e) print(value) fff.close()
def click(target): ActionChains(dash_duo.driver).move_to_element_with_offset( target, 5, 5).click().perform() dash_duo._wait_for_callbacks()
def doCommentRound(self): j = 1 i = 0 scrolled = False ac = ActionChains(self.driver) dms = self.driver.find_element_by_class_name("xWeGp") while i < 5: try: if (j > 5): scrolled = True target = self.driver.find_element_by_css_selector( "body > div.RnEpo.Yx5HN > div > div > div.isgrP > ul > div > li:nth-child(" + str(j) + ")") self.driver.execute_script( 'arguments[0].scrollIntoView(true);', target) # Check if user was previously commented on username = self.driver.find_elements_by_class_name( "_0imsa")[j].text print("Got username text: " + username) if (self.alreadyCommented(username)): j += 1 continue # Click on follower from list of followers self.getFollower(j) # try: # username = self.driver.find_element_by_css_selector("#react-root > section > main > div > header > section > div.nZSzR > h2").text # print("Got username: "******"#react-root > section > main > div > header > section > div.nZSzR > h1").text # print("Got username: "******"Comment number " + str(i) + " on account number " + str(j)) self.previousComments.write(username + " ") else: if (self.comment(randomCommentText())): i += 1 print('Commented on ' + username) print("Comment number " + str(i) + " on account number " + str(j)) self.previousComments.write(username + " ") #i+=1 j += 1 else: j += 1 self.driver.back() except: print('Unable to comment') self.driver.back() j += 1 except: print('Overall comment round failed.') # posts = self.driver.find_element_by_xpath("//*[contains(text(), 'Follow')]") # ac.move_to_element(posts).click().perform() #dms = self.driver.find_element_by_class_name("xWeGp") followers_heading = self.driver.find_element_by_class_name( "m82CD") ac.move_to_element(followers_heading).click().perform() print('Moved to avoid comment failure.') j += 1 if (j > 150): return True self.previousComments.close() return True
driver.find_element_by_xpath( "/html/body/div[1]/div[2]/div[1]/div[5]/a").click() time.sleep(2) driver.switch_to.frame(0) #如果出现图片滑块验证码,那么需要处理下拖动 首先要按住滑块 然后拖动滑块 最后松开滑块 if elementExists(driver, '//*[@id="slideBkg"]'): #自己写的方法,判断页面中的验证码元素存不存在,如果存在需要后续操作 print("有滑块验证码,需要处理") rollBar = driver.find_element_by_xpath( '//*[@id="tcaptcha_drag_thumb"]') #获取到滑块的element ActionChains(driver).click_and_hold( on_element=rollBar).perform() #click_and_hold 是指按住 perform执行 bgImg = driver.find_element_by_xpath('//*[@id="slideBkg"]').get_attribute( "src") #获取背景大图的地址,有残缺的 smImg = driver.find_element_by_xpath( '//*[@id="slideBlock"]').get_attribute("src") #获取需要拼上的小图地址 yImg = bgImg[0:-1] + str(0) #获取需要拼上的原图地址,没有缺口 print(yImg) yImgData = request.urlopen( yImg, context=ssl._create_unverified_context()).read() #读取完整图片的数据 if not os.path.exists("./yanzhengma"): #创建存放图片的位置 os.mkdir("./yanzhengma") yImgSaveName = "./yanzhengma/" + str(int(time.time() * 10000)) + ".jpg" with open(yImgSaveName, "wb") as fd: fd.write(yImgData)
def get_one_day(browser, plant_tz, menu_text, fill_NA, graph_num, go_back_one_day=False): '''This function retrieves high-resolution data for the current day or the pevious day depending the 'go_back_one_day' parameter. Parameters ---------- For documentation of most parameters, see documentation of the "get_data" function. 'broswer' is the Selenium web browser object; it is assumed that the browswer object just loaded the main page for the target PV plant. 'go_back_one_day': If True, the prior days readings are returned instead of the current day's readings. Return Value ------------ The return value from this function is a two-tuple: a list of Unix Epoch timestamps and a list of Power Production values in kW. ''' # Find and Click the proper menu item browser.switch_to_window(browser.window_handles[0]) browser.find_element_by_xpath(POWER_MENU_ITEM % menu_text).click() # Make control ID substrings to search for, depending on whether there are # multiple graphs on the page if graph_num is not None: day_prior_id = 'UserControl%s_btn_prev' % graph_num date_id = 'UserControl%s__datePicker' % graph_num cog_id = 'UserControl%s_OpenButtonsDivImg' % graph_num detail_id = 'UserControl%s_ImageButtonValues' % graph_num else: day_prior_id = 'btn_prev' date_id = 'datePicker' cog_id = 'OpenButtonsDivImg' detail_id = 'ImageButtonValues' if go_back_one_day: browser.find_element_by_xpath(INPUT_XPATH % day_prior_id).click() # need a delay here, otherwise the next find element statement will find # the *old* datePicker text box. time.sleep(5) # Read out the day the data applies to the_day = browser.find_element_by_xpath(INPUT_XPATH % date_id).get_attribute("value") # Hover over the cog icon in the lower right of the graph element = browser.find_element_by_xpath(IMG_XPATH % cog_id) hov = ActionChains(browser).move_to_element(element) hov.perform() # Need to wait for the "Details" icon to show before clicking it. detail_object = WebDriverWait(browser, 7).until(EC.element_to_be_clickable((By.XPATH, INPUT_XPATH % detail_id))) detail_object.click() # Switch to the Details window that popped up and get the HTML # from it. browser.switch_to_window(browser.window_handles[1]) WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.XPATH, "//table[contains(@id, 'Table1')]"))) result_html = browser.page_source browser.close() # Have Pandas read the HTML and extract the data from the table within # it. The first (and only) table contains the data we want. df = pd.read_html(result_html)[0] df.columns = ['Time', 'Power'] # Convert Power values to numeric. Handle rows that don't have power # values according to the fill_NA parameter. # that aren't present yet. df.Power = pd.to_numeric(df.Power, errors='coerce') if fill_NA: # drop first row as it contains no data df = df.drop(0) # fills NA Power values with 0.0 df = df.fillna(0.0) else: # drops all rows with no kW data and the first garbage row. df = df.dropna() # get the time strings and create full date/time timestamps in a # numpy array. Also create an array of kW values ts_strings = (the_day + ' ' + df.Time).values vals = df.Power.values # convert the timestamps to Unix Epoch values. tz = pytz.timezone(plant_tz) ts_unix = [] for ts in ts_strings: dt = dateutil.parser.parse(ts) dt_aware = tz.localize(dt) ts_unix.append(calendar.timegm(dt_aware.utctimetuple())) # With the Sunny Portal, a Midnight value is reported at the end of the # day, but really should have a date for the next day. Look for this # problem and correct it. if len(ts_unix) > 1: if ts_unix[-1] < ts_unix[-2]: ts_unix[-1] += 3600 * 24 # add a day to the point mislabeled # eliminate any entries past the current time. This may happen if 'fill_NA' # is set to True. cur_ts = time.time() for i in range(len(ts_unix)): if ts_unix[i] >= cur_ts: ts_unix = ts_unix[:i] vals = vals[:i] break return list(ts_unix), list(vals)
def click_item(self, item): action = ActionChains(self.driver) action.click(item).perform()
def scrape_espn(url="http://fantasy.espn.com/football/players/projections", out=RAW_PROJECTIONS): """Scrape ESPN projections. Tricky because it's a React app without routing. Have to load the app and click through the buttons. Was reason for using Selenium """ print("scraping ESPN") # set this to the p DRIVER.get(url) time.sleep(4) # wait for JS app to render players = [] current_button = 1 while True: scroll() soup = BeautifulSoup( DRIVER.execute_script("return document.body.innerHTML"), "html.parser") for player in soup.select("div.full-projection-table"): name = player.select(".pointer")[0].get_text() assert name pos = "" team = "" if player.select(".position-eligibility"): # D/ST don't have these pos = player.select( ".position-eligibility")[0].get_text() # ex RB team = player.select( ".player-teamname")[0].get_text() # ex Bears table = player.select(".player-stat-table")[0] projection_row = table.find("tbody").find_all("tr")[1] headers = [ e.find("div").get("title") for e in projection_row.find_all("td") ][1:] headers = [column(h) for h in headers] data = [e.get_text().lower() for e in projection_row][1:] p_data = {} p_data["name"] = name.strip() p_data["pos"] = pos.strip() team = team.strip() if team == "FA": team = "Washington" p_data["team"] = NAME_TEAM_MAP[team] if team else "" if "D/ST" in name: p_data["name"] = p_data["name"].replace(" D/ST", "").strip() if p_data["name"] == "FA": p_data["name"] = "Washington" p_data["team"] = NAME_TEAM_MAP[p_data["name"]] p_data["pos"] = "DST" for h, d in zip(headers, data): if h in p_data: continue # oddly there are duplicate columns if "/" in d: if "&" in h: h1, h2 = h.split("&") else: h1, h2 = h.split("/") d1, d2 = d.split("/") if d1 != "--": p_data[column(h1)] = float(d1) if d2 != "--": p_data[column(h2)] = float(d2) elif "-" in d: p_data[h] = np.NaN else: p_data[h] = float(d) players.append(p_data) try: next_button = DRIVER.find_element_by_link_text( str(current_button + 1)) actions = ActionChains(DRIVER) actions.move_to_element(next_button).perform() except Exception as err: break try: current_button += 1 next_button.send_keys(Keys.ENTER) time.sleep(1.5) except Exception as err: print(err) break df = pd.DataFrame(players) df["fumbles"] = np.nan df["two_pts"] = np.nan df["kick_0_19"] = np.nan df["kick_20_29"] = np.nan df["kick_30_39"] = np.nan df["df_points_allowed_per_game"] = df["points_allowed"].astype( float) / 16.0 df["df_safeties"] = np.nan df = unify_columns(df) df.to_csv(os.path.join(out, f"ESPN-Projections-{YEAR}.csv"), index=False) validate(df)
def setUp(inst): inst.action = ActionChains(driver) inst.element = WebDriverWait(driver, 20).until( EC.presence_of_element_located((By.ID, 'creat-meet'))) t.sleep(2)
def sSeleccionarElemento(self, element):#Seleccionar item en caja opciones if(self.Verificar(element)): ActionChains(self.driver).move_to_element(element).click(element).perform()
def scrape_nfl(out=RAW_PROJECTIONS): """Scrape NFL projections. Static routes, but the URLs are massive w/ query parameters. Example: https://fantasy.nfl.com/research/projections?position=0&statCategory=projectedStats&statSeason=2019&statType=seasonProjectedStats#researchProjections=researchProjections%2C%2Fresearch%2Fprojections%253Foffset%253D1%2526position%253DO%2526sort%253DprojectedPts%2526statCategory%253DprojectedStats%2526statSeason%253D2019%2526statType%253DseasonProjectedStats%2526statWeek%253D1%2Creplace First page: https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason=2019&statType=seasonProjectedStats&statWeek=1 Second page: https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason=2019&statType=seasonProjectedStats&statWeek=1#researchProjections=researchProjections%2C%2Fresearch%2Fprojections%253Foffset%253D6%2526position%253DO%2526sort%253DprojectedPts%2526statCategory%253DprojectedStats%2526statSeason%253D2019%2526statType%253DseasonProjectedStats%2526statWeek%253D1%2Creplace Last page: https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason=2019&statType=seasonProjectedStats&statWeek=1#researchProjections=researchProjections%2C%2Fresearch%2Fprojections%253Foffset%253D926%2526position%253DO%2526sort%253DprojectedPts%2526statCategory%253DprojectedStats%2526statSeason%253D2019%2526statType%253DseasonProjectedStats%2526statWeek%253D1%2Creplace Just going to simluate clicking the next button until there's no next button """ print("scraping NFL") # list of page urls and expected headers on that page pages = [ ( # QB/WR/RB/TE f"https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason={YEAR}&statType=seasonProjectedStats&statWeek=1", [ "Passing_Yds", "Passing_TD", "Passing_Int", "Rushing_Yds", "Rushing_TD", "Receiving_Rec", "Receiving_Yds", "Receiving_TD", "Ret_TD", "FumTD", "2PT", "Lost", "Points", ], ), ( # K f"https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason={YEAR}&statType=seasonProjectedStats&statWeek=1#researchProjections=researchProjections%2C%2Fresearch%2Fprojections%253Fposition%253D7%2526statCategory%253DprojectedStats%2526statSeason%253D{YEAR}%2526statType%253DseasonProjectedStats%2526statWeek%253D1%2Creplace", ["Made", "0-19", "20-29", "30-39", "40-49", "50+", "Points"], ), ( # DST f"https://fantasy.nfl.com/research/projections?offset=1&position=O&sort=projectedPts&statCategory=projectedStats&statSeason={YEAR}&statType=seasonProjectedStats&statWeek=1#researchProjections=researchProjections%2C%2Fresearch%2Fprojections%253Fposition%253D8%2526statCategory%253DprojectedStats%2526statSeason%253D{YEAR}%2526statType%253DseasonProjectedStats%2526statWeek%253D1%2Creplace", [ "Sack", "Int", "Fum_Rec", "Saf", "TD", "Def_2pt_Ret", "Ret_TD", "Pts Allow", "Points", ], ), ] players = [] for page_index, (page_url, headers) in enumerate(pages): DRIVER.get(page_url) time.sleep(1) scroll() time.sleep(1) headers = [column(h) for h in headers] headers = ["name", "pos", "team"] + headers while True: soup = BeautifulSoup( DRIVER.execute_script("return document.body.innerHTML"), "html.parser") table = soup.find("tbody") for row in table.find_all("tr"): # for each player if isinstance(row, NavigableString): continue if not len(row.find_all("td")): continue # get name, position and team name_cell = row.find_all("td")[0] name = name_cell.select(".playerNameFull")[0].get_text() pos_team = name_cell.find("em").get_text() pos_team = [v.strip() for v in pos_team.split("-")] if page_index == 2: # is DST name = name.split(" ")[-1] team = NAME_TEAM_MAP[name] data = [name, "DST", team] elif len(pos_team) == 1: continue # player not on a team else: team = pos_team[1] if team == "LA": team = "LAR" if team == "WAS": team = "WSH" if team == "JAC": team = "JAX" data = [name, pos_team[0], team] data += [ td.get_text().strip() if "-" not in td.get_text() else np.nan for td in row.find_all("td")[3:] ] player_data = {} for k, v in zip(headers, data): player_data[k] = v players.append(player_data) # find and click the next button try: next_button = DRIVER.find_element_by_link_text(">") actions = ActionChains(DRIVER) actions.move_to_element(next_button).click().perform() time.sleep(1) scroll() time.sleep(1) except: break df = pd.DataFrame(players) df["two_pts"] = df["2pt"] df["df_points_allowed_per_game"] = df["pts_allow"].astype(float) / 16.0 df = unify_columns(df) df.to_csv(os.path.join(out, f"NFL-Projections-{YEAR}.csv"), index=False) validate(df)
def scrape_timeline_as_articles_lxml(driver, n_tweets=50): ''' lxml is WAY faster than selenium parsing vroom vroom ''' # initialize the return object all_tweets = [] all_tweet_links = [] # get the first tweet #first_article=driver.find_element_by_tag_name('article') first_article = WebDriverWait(driver, WAIT_TIME).until( EC.presence_of_element_located((By.TAG_NAME, "article"))) a_html = first_article.get_attribute('innerHTML') to_add = tweet_article_to_dict_lxml(a_html) all_tweets.append(to_add) all_tweet_links.append(to_add['tweet_link']) minimum_y = first_article.location['y'] # add tweets until there are n_tweets in the return object while len(all_tweets) < n_tweets: # collect article elements _ = WebDriverWait(driver, WAIT_TIME).until( EC.presence_of_element_located((By.TAG_NAME, "article"))) articles = driver.find_elements_by_tag_name('article') tmp_tweets = [] tmp_tweet_links = [] for a in articles: try: # check that article is beyond the first tweet if a.location.get('y') < minimum_y: continue a_html = a.get_attribute('innerHTML') to_add = tweet_article_to_dict_lxml(a_html) tmp_tweets.append(to_add) tmp_tweet_links.append(to_add['tweet_link']) ''' # skip if already collected if ((to_add.get('tweet_link')==None) or (to_add['tweet_link'] in tweet_links)) and to_add.get('promoted')==None: continue tweet_links.append(to_add['tweet_link']) tweets.append(to_add) ''' except StaleElementReferenceException as e: print("Error a: stale element") except Exception as e: print("Error b: {}".format(str(e))) # add new articles # can't just check if tweet is in the list already, # since tweets can appear twice in a timeline, # so add based on non-overlap with most recent tweet in all_tweets new_index = 0 if all_tweet_links[-1] in tmp_tweet_links: new_index = tmp_tweet_links.index(all_tweet_links[-1]) + 1 all_tweets += tmp_tweets[new_index:] all_tweet_links += tmp_tweet_links[new_index:] print("\nAdding {} fresh tweets out of {} parsed".format( len(tmp_tweets[new_index:]), len(tmp_tweets))) print("{} / {} tweets collected".format(len(all_tweets), n_tweets)) if len(all_tweets) >= n_tweets: print("Continuing...") continue # early break if possible print("scrolling...") # move down the page to last article examined _ = WebDriverWait(driver, WAIT_TIME).until( EC.presence_of_element_located((By.TAG_NAME, "article"))) articles = driver.find_elements_by_tag_name('article') ActionChains(driver).move_to_element(articles[-1]).perform() # let things load a bit and don't be predictable! time.sleep(SCROLL_TIME + (randint(10, 50) / 100)) # scroll back to top of page when done driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.HOME) return all_tweets
def scrape(self, driver, page=None): driver.get(self.url) try: login = WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.ID, "mat-input-0"))) login.send_keys(self.serviceName) password = driver.find_element_by_id("mat-input-1") password.send_keys(self.servicePass) submit = driver.find_element_by_css_selector( ".mat-raised-button").click() reading = WebDriverWait(driver, 10).until( EC.presence_of_element_located( (By.CSS_SELECTOR, sections.get(page)))) reading.click() try: menu_selector = WebDriverWait(driver, 10).until( EC.presence_of_element_located( (By.CSS_SELECTOR, "#mat-select-1 .mat-select-arrow"))).click() element = driver.find_element_by_css_selector( ".ng-trigger-transformPanel") actions = ActionChains(driver) actions.move_to_element(element).perform() select_month = driver.find_element_by_css_selector( month_dict.get(self.serviceMonth)).click() if page == 'reading': enter_year = WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.ID, "mat-input-5"))) enter_year.clear() enter_year.send_keys(self.serviceYear) else: enter_year = WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.ID, "mat-input-4"))) enter_year.clear() enter_year.send_keys(self.serviceYear) try: enter_button = WebDriverWait(driver, 10).until( EC.presence_of_element_located( (By.CSS_SELECTOR, ".primary > .mat-button-wrapper"))).click() fetch_results = WebDriverWait(driver, 10).until( EC.presence_of_element_located( (By.CSS_SELECTOR, ".mr-1 > .mat-button-wrapper"))).click() results = driver.page_source for element in banking: javaScript = "return document.getElementById('{}').value;".format( element) self.bankingUnits.append( driver.execute_script(javaScript)) logout_menu = driver.find_element_by_css_selector( ".ml-xs .mat-icon").click() logout = WebDriverWait(driver, 10).until( EC.presence_of_element_located(( By.CSS_SELECTOR, ".mat-menu-item:nth-child(5) > .mat-menu-ripple"))) logout.click() driver.quit() return results except selenium.common.exceptions.TimeoutException as e: driver.quit() status = "Error3" return status except selenium.common.exceptions.TimeoutException as e: driver.quit() status = "Error2" return status except (selenium.common.exceptions.WebDriverException, selenium.common.exceptions.TimeoutException) as e: driver.quit() status = "Error1" return status
def download_files(): # ignore the notification message if shown action = ActionChains(driver) message_box = driver.find_element_by_id('NotificationsAskMsg') action.move_to_element(message_box).perform() inside_msg = driver.find_element_by_tag_name('p') action.move_to_element(inside_msg).perform() dismiss = driver.find_element_by_class_name('NotificationIgnore') action.move_to_element(dismiss).perform() dismiss.click() # Ads handling here window_before = driver.window_handles[0] try: window_after = driver.window_handles[1] if window_after: driver.switch_to.window(window_after) driver.implicitly_wait(3) driver.close() # back to original window driver.switch_to.window(window_before) driver.implicitly_wait(2) # Getting user movie choice search_for = input( str('What you want to search for today ? : \n Search for movies only : ')) # Search bar field top_tag = driver.find_element_by_id("head") form = top_tag.find_element_by_tag_name('form') search_bar = form.find_element_by_class_name('autoComplete') search_bar.send_keys(search_for) driver.implicitly_wait(2) dropdown_search = form.find_element_by_tag_name('div') items = dropdown_search.find_elements_by_tag_name('a') all_movies = [] # for movie title movies_links = [] # movie links for item in items: text = item.text all_movies.append(text) link = item.get_attribute('href') movies_links.append(link) print('Available Movies for this Search are : ', all_movies) # Getting user movie year movie_year = input(str('On what Year : ')) # return matched title and link typed by user match_title = re.compile( search_for + '\s' + '\(%s\)+'%str(movie_year), re.I) title_rs = list(filter(match_title.search, all_movies)) link_match = re.compile('/' + search_for + '-' + movie_year, re.I) link_rs = list(filter(link_match.search, movies_links)) if title_rs and link_rs: print('Found Title match : ', title_rs, '\n', 'Url match is : ', link_rs) else: print('Nothing Found ') else: print('noo ads shown !!!! ') pass except: Exception()
def tMouse(self, element): if(self.Verificar()): print("Moviendo mouse sobre el elemento \"%s\""%element.get_attribute("name")) ActionChains(self.driver).move_to_element(element)
def lookup_commissions(driver, sd, ed): # Log in first driver.get('https://www.slumbercloud.com/affiliates/account/login/') ''' user = driver.find_element_by_xpath("//input[@placeholder='Email Address']") password = driver.find_element_by_xpath("//input[@placeholder='Password']") user.clear() user.send_keys(cred['username']) password.clear() password.send_keys(cred['password']) driver.find_element_by_xpath("//button[@id='send2']").click() driver.save_screenshot('login.png') ''' mouse = webdriver.ActionChains(driver) user = driver.find_element_by_xpath("//input[@name='login[username]']") password = driver.find_element_by_xpath("//input[@name='login[password]']") mouse.move_to_element(user).perform() user.clear() mouse.click(user).perform() mouse.send_keys("*****@*****.**").perform() time.sleep(8) driver.save_screenshot('now.png') mouse = ActionChains(driver) mouse.move_to_element(password).perform() mouse.click(password).perform() password.clear() mouse.send_keys("pIZeO8ec#3ld").perform() driver.save_screenshot('now1.png') time.sleep(8) #driver.find_element_by_xpath("//button[@id='send2']").click() #driver.find_element_by_xpath("//div[@class='primary']/button[@id='send2']").click() mouse.click( driver.find_element_by_xpath("//button[@id='send2']/span")).perform() driver.save_screenshot('now2.png') time.sleep(8) # Go to commissions # driver.find_element_by_link_text("Commissions").click() driver.get('https://www.slumbercloud.com/affiliate/account/') # now get conversion data # date is now created date in new site # product_name is now title in new site # total_amount is not gone from new site. Filled with zero # commission is now amount in new site # status remains the smae. No change post_dict = { 'number': [], 'date': [], 'product_name': [], 'total_amount': [], 'commission': [], 'status': [] } has_next = True while has_next: rows = driver.find_element_by_xpath( '//table[@id="affiliate-transactions-history"]/tbody' ).find_elements_by_tag_name('tr') for i in range(len(rows)): row = rows[i].find_elements_by_tag_name('td') for j in range(len(row)): number = row[0].get_attribute('textContent') product_name = row[1].get_attribute('textContent') total_amount = '$0' commission = row[2].get_attribute('textContent') status = row[3].get_attribute('textContent') date = row[4].get_attribute('textContent') date = parse(date) post_dict['number'].append(number) post_dict['date'].append(date) post_dict['product_name'].append(product_name) post_dict['total_amount'].append(total_amount) post_dict['commission'].append(commission) post_dict['status'].append(status) try: driver.find_element_by_xpath('//a[@title="Next"]').click() except: has_next = False return post_dict
from helper import list_actions # DIR='/home/bruhh/workspace/bmc/html_reports/automation/HTML_20Report' DIR='/home/bruhh/workspace/bmc/html_reports/automation/html_r' STATUS='/html/body/div[2]/div[1]/section/div/div[1]/a' FAIL='/html/body/div[2]/div[1]/section/div/div[1]/ul/li[2]' fields = ['test-name', 'test-status', 'test-time', 'test-author'] driver = webdriver.Chrome() driver.get(f'file://{DIR}/report.html') action = ActionChains(driver) # select all fail status s = driver.find_element_by_xpath(STATUS) action.move_to_element(s).perform() s = driver.find_element_by_xpath(FAIL) action.move_to_element(s).perform() s.click() lst = driver.find_element_by_id('test-collection') d = lst.find_elements_by_class_name('test-heading') action = ActionChains(driver) for row in d: if row.is_displayed():
def _enter_chat_room(self, room_name): self.driver.get(self.live_server_url + '/chat/') ActionChains(self.driver).send_keys(room_name + '\n').perform() WebDriverWait(self.driver, 2).until(lambda _: room_name in self.driver.current_url)
# -*- coding: utf-8 -*- # @Time : 2019/5/22 22:20 # @Author : l7 # @Email :[email protected] # @File : l7_11_鼠标操作.py # @Software : PyCharm from selenium.webdriver.common.action_chains import ActionChains as AC from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import Select #打开浏览器Chrome driver = webdriver.Firefox() #打开网址 driver.get("http://www.baidu.com") #鼠标悬浮设置,出现拉下列表,悬浮出现的列表,使用定位的快捷键,按住shift+ctrl+c,鼠标点击需要定位的元素 #实例化 ac = AC(driver) #鼠标行为,最后调用perform()来执行鼠标操作 ele = driver.find_element_by_xpath("设置按钮的定位") ac.move_to_element(ele).perform() #等待下拉菜单出现,点击 WebDriverWait(driver, 10).until( EC.visibility_of_element_located((By.XPATH, "下拉列表的元素-高级设置"))) driver.find_element_by_xpath("下拉列表的元素-高级设置").click() #鼠标拖拽 AC.drag_and_drop()
browser = webdriver.Safari() browser.get('http://sso.yf.dasouche.net/login.htm?') browser.maximize_window() sleep(2) # 登陆 login_username = browser.find_element_by_id('username').send_keys('15310011102') login_pw = browser.find_element_by_id('password').send_keys('aa123123') sleep(2) login_button = browser.find_element_by_id('submit-btn') login_button.click() sleep(5) #对ActionChains实例化 actionChains=ActionChains(browser) # 客户管理 customerbtn = browser.find_element_by_xpath( '//div[@class="first-level-title"]//a[contains(text(),"客户")]') # 根据父子节点结合包裹标签定位 customerbtn.click() sleep(2) # 点击二级菜单的客户管理 custbtn = browser.find_element_by_xpath( '//a[@href="https://dev.yf.dasouche.net/index.html?url=https://f2e.yf.dasouche.net/infiniti/new-customer-management/list.html"]').click() sleep(3) # 订单管理 order = browser.find_element_by_xpath('//div[@class="first-level-title"]//a[contains(text(),"订单")]') order.click()
def _post_message(self, message): ActionChains(self.driver).send_keys(message + '\n').perform()
""" Created on Sat Jul 4 19:09:14 2020 @author: shruti """ from selenium import webdriver import time from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains import logindata1 options = webdriver.ChromeOptions() options.add_argument("--start-maximized") driver = webdriver.Chrome('C:\chromedriver.exe') action = ActionChains(driver) time.sleep(1) driver.get('http://www.flipkart.com') time.sleep(3) sign = driver.find_element_by_xpath( '/html/body/div[2]/div/div/div/div/div[2]/div/form/div[1]/input') sign.send_keys(logindata1.USERNAME) time.sleep(3) passwordelement = driver.find_element_by_xpath( '/html/body/div[2]/div/div/div/div/div[2]/div/form/div[2]/input') passwordelement.send_keys(logindata1.PASSWORD) time.sleep(3)
def click_on_the_checkbox(self, locator): element = self.element_is_displayed(locator) ActionChains(self.wd).move_to_element(element).perform()
all_pages = driver.find_element_by_class_name("jPag-pages") pages = all_pages.find_elements_by_tag_name("li") num_pages = len(pages) except: num_pages = 1 page_num = 1 num_tries = 0 while (page_num <= num_pages): # wait for up to 3 seconds for the page to be clickable, if there is more than 1 page if (num_pages > 1): page_string = "ul.jPag-pages li:nth-child(" + \ str(page_num) + ")" wait.until( EC.visibility_of_element_located( (By.CSS_SELECTOR, page_string))) actions = ActionChains(driver) cur_page = driver.find_element_by_css_selector(page_string) actions.move_to_element(cur_page).click().perform() # wait for the results to load try: wait.until( EC.visibility_of_all_elements_located( (By.XPATH, "//div[@class='row-fluid class-title']"))) except: break # wait for up to 5 seconds for the expand all classes to be clickable, then click it wait.until( EC.element_to_be_clickable((By.XPATH, "//a[@id='expandAll']"))) actions = ActionChains(driver) expand_button = driver.find_element_by_xpath("//a[@id='expandAll']") actions.move_to_element(expand_button).click().perform()
# 外框的坐标 square = browser.find_element_by_xpath( '//*[@id="app"]/div/section/div[3]/center/div/div/div[2]') # 滑块的长度 drag_button = browser.find_element_by_xpath( '//*[@id="app"]/div/section/div[3]/center/div/div/div[3]') drag_button_len = drag_button.size.get('width') # 滑动条的长度 square_len = square.size.get('width') # 终止x坐标 end_x = int(square_len - drag_button_len) time.sleep(1) # 定义模拟动作 action = ActionChains(browser) # 拖拽滑块 action.drag_and_drop_by_offset(drag_button, end_x, 0) action.perform() time.sleep(1) submit = browser.find_element_by_xpath( '//*[@id="app"]/div/section/div[4]/button') submit.click()
def login(): # 实例化浏览器 driver = webdriver.Chrome() # 请求登录网址 driver.get('https://account.cnblogs.com/signin?returnUrl=https%3A%2F%2Fwww.cnblogs.com%2F') # 最大化浏览器 driver.maximize_window() # 输入账号#mat-input-0 # driver.find_element_by_xpath('//*[@id="mat-input-0"]').send_keys('poort') # 输入密码 driver.find_element_by_xpath('//*[@id="mat-input-1"]').send_keys('110112119zxq') # 点击登录 driver.find_element_by_xpath('//div/button/span[1]').click() # 等待2s使验证弹窗加载完成 time.sleep(2) # 定位到圆球 mat-input-0 slider = driver.find_element_by_xpath('//div[@class="geetest_slider_button"]') # 点击鼠标左键,不松开 ActionChains(driver).click_and_hold(slider).perform() # 拖动到最右边,为了后续方便对比 ActionChains(driver).move_by_offset(xoffset=198, yoffset=0).perform() # 定位到弹出的验证窗口 y_element = driver.find_element_by_xpath('/html/body/div[2]/div[2]') # print(y_element.location) # print(y_element.size) # 获取左上,右,左下的坐标确定一个图片范围 left = y_element.location['x'] top = y_element.location['y'] right = left + y_element.size['width'] bottom = top + y_element.size['height'] # 全窗口截图 driver.save_screenshot('a.png') # 打开截图的图片 im = Image.open('a.png') # 局部截图 im = im.crop((left + 160, top + 55, right + 225, bottom - 30)) # 保存有缺口的验证图片 im.save('b.png') # 放开鼠标 ActionChains(driver).release(slider).perform() time.sleep(2) # 定位到可以显示无缺图片的位置 block = driver.find_element_by_xpath('/html/body/div[2]/div[2]/div[6]/div/div[1]/div[1]/div/a/div[1]/canvas') # 修改其属性值,使显示无缺图片 driver.execute_script('arguments[0].style = "display: block; opacity: 1;"', block) time.sleep(2) # 全窗口截图 driver.save_screenshot('a.png') # 打开截图的图片 im = Image.open('a.png') # 局部截图 im = im.crop((left + 160, top + 55, right + 225, bottom - 30)) # 保存无缺口的验证图片 im.save('c.png') time.sleep(0.5) # 打开获取的两个图片 imageb = Image.open('b.png') imagec = Image.open('c.png') # 获取缺口位置 visualstack = get_diff_location(imagec, imageb) # 减去左边图片空白像素值 print(visualstack - 10) # 点击鼠标左键,不松开 ActionChains(driver).click_and_hold(slider).perform() # 先快速拖动圆球到中间位置 ActionChains(driver).move_by_offset(xoffset=visualstack/2,yoffset=0).perform() # 根据轨迹拖动圆球 track_list = get_tracks1((visualstack/2 - 48)) for track in track_list: ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform() # 放开圆球 time.sleep(0.8) ActionChains(driver).release(slider).perform() print(driver.page_source) time.sleep(4) if '你的昵称' in driver.page_source: print('登录成功') print(driver.get_cookies()) else: driver.close() login()
def F_RunAccounts(index, list_contas): try: list_down = False browser.implicitly_wait(2) conta = list_contas[index] conta_title = conta.find_element_by_xpath('.//*[@class="resultadoBusca--left-title"]').text.strip() conta_number = conta.find_element_by_xpath('.//*[@class="resultadoBusca--left-conta"]').text.strip() conta_number = re.search("\\d+", conta_number).group(0).strip() F_WriteLog('Download do extrato da conta: ' + conta_title + ' - ' + conta_number) acc_filename = str(index + 1) + '_' + conta_number + '.pdf' if F_CheckIfExistsFile(acc_filename, prd_folder) is False: conta.find_elements_by_css_selector('span.resultadoBusca--right-expandirMenu')[0].click() browser.implicitly_wait(1) conta.find_elements_by_css_selector('.resultadoBusca--bottom.ativo button')[0].click() browser.implicitly_wait(3) menu_conta_digital = browser.find_element_by_xpath('//*[@id="Menu"]/div/div/nav/ul/li[2]') actions = ActionChains(browser) actions.move_to_element(menu_conta_digital).perform() browser.implicitly_wait(1) browser.find_element_by_xpath('//*[@id="Menu"]/div/div/nav/ul/li[2]/div[2]/div/div[1]/ul/li/a').click() # Hover browser.implicitly_wait(5) browser.find_element_by_xpath('//*[@id="frm:periodoExtrato"]/option[4]').click() # 60 Ultimos dias browser.implicitly_wait(3) browser.find_element_by_xpath('//*[@id="frm"]/div[2]/div/div[7]/input').click() # Consultar browser.implicitly_wait(10) browser.find_element_by_xpath('//*[@id="j_idt90"]/div[1]/div[2]/a').click() # Download do PDF F_WriteLog('Esperando o download do arquivo de PDF...') while F_CheckIfExistsFile(current_filename, download_folder) is False: sleep(1) F_WriteLog('Movendo arquivo para a pasta de processamento') is_moved = F_MoveAndRenameFile(current_filename, acc_filename, download_folder, prd_folder) if is_moved is None: F_WriteLog('Erro ao mover arquivo da conta ' + conta_title + ' - ' + conta_number + '\n') else: F_WriteLog('Extrato da conta baixado e movido com sucesso\n') list_down = False else: F_WriteLog('Extrato da conta já existe na pasta de processamento.\n') browser.implicitly_wait(3) list_down = True index = index + 1 browser.implicitly_wait(2) if index >= len(list_contas): F_WriteLog('Fim do download das contas\n') return F_RunAccounts(index, F_GetListAccounts(list_down)) except: logging.error('Erro:', exc_info=True) F_WriteLog('Erro inesperado ao baixar conta\n') global num_erros num_erros += 1 if num_erros == max_err_count: return