from selenium.webdriver.common.keys import Keys from selenium.webdriver import ActionChains from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_argument("--headless") driver = webdriver.Chrome(executable_path='/usr/bin/chromedriver', options=chrome_options) driver.get('http://pythonscraping.com/pages/files/form.html') firstnameField = driver.find_element_by_name('firstname') lastnameField = driver.find_element_by_name('lastname') submitButton = driver.find_element_by_id('submit') ### METHOD 1 ### #firstnameField.send_keys('Philip') #lastnameField.send_keys('Tung') #submitButton.click() ##################### ### METHOD 2 #### actions = ActionChains(driver).click(firstnameField).send_keys('Philip').click( lastnameField).send_keys('Tung').send_keys(Keys.RETURN) actions.perform() ##################### print(driver.find_element_by_tag_name('body').text) driver.close()
from selenium import webdriver from selenium.webdriver import ActionChains browser = webdriver.Chrome() browser.get('http://www.baidu.com') set_node = browser.find_element_by_xpath('//*[@id="u1"]/a[8]') mouse_obj = ActionChains(browser) mouse_obj.move_to_element(set_node) mouse_obj.perform() browser.find_element_by_link_text('高级搜索').click() # browser.quit()
def get_constituency_candidates_table(constituency_object, year): print(constituency_object.candidate_table_savepath) if not Path(constituency_object.candidate_table_savepath).exists(): logging.info(f'current constituency :{constituency_object.name}') browser = Scraper().launch_browser() # open homepage browser.get(constituency_object.page_url) # open given constituency candidates table page try: # if constituency name matches completely if year == 2014: constituency_url = browser.find_element_by_link_text( constituency_object.name.upper()).get_attribute('href') browser.get(constituency_url) else: constituency_url = browser.find_element_by_link_text( constituency_object.name).get_attribute('href') browser.get(constituency_url) except Exception as e: logging.info(e) try: if year == 2014: # if constituency name matches partially constituency_url = browser.find_element_by_partial_link_text( constituency_object.name.upper()).get_attribute('href') browser.get(constituency_url) else: constituency_url = browser.find_element_by_partial_link_text( constituency_object.name).get_attribute('href') browser.get(constituency_url) except Exception as e2: logging.info(e2) # constituency is missing logger.info( f'missing data for constituency {constituency_object.name}\n' ) browser.close() try: element = browser.find_element_by_xpath( constituency_object.candidate_table_xpath) candidates_table_html = element.get_attribute('outerHTML') with open(constituency_object.candidate_table_savepath, 'w') as handle: handle.write(candidates_table_html) browser.close() except Exception as e: logging.info(e) try: logging.info(e) browser.refresh() browser.get(constituency_url) time.sleep(5) element = browser.find_element_by_xpath( constituency_object.candidate_table_xpath) mouse = ActionChains(browser) mouse.move_to_element(element) mouse.perform() candidates_table_html = element.get_attribute('outerHTML') with open(constituency_object.candidate_table_savepath, 'w') as handle: handle.write(candidates_table_html) browser.close() except Exception as e2: logging.info(e2) logger.info( f'missing data for constituency {constituency_object.name}\n' ) else: logging.info('loading from archive') logging.info(f'{constituency_object.name} already exists')
async def play(ctx, url: str): song_there = os.path.isfile("song.mp3") try: if song_there: os.remove("song.mp3") print("Removed old song file\n") except PermissionError: print("Trying to delete song file, but it is playing") await ctx.send("Error: Song is currently playing") return speak("Getting everything ready now") await ctx.send("Getting everything ready now.") voice = get(bot.voice_clients, guild=ctx.guild) ydl_opts = { 'format': 'bestaudio/best', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], } driver = webdriver.Chrome(PATH) driver.get("https://www.youtube.com") search_bar = driver.find_element_by_xpath( '/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/form/div/div[1]/input' ) search_bar.send_keys(url) search_bar.send_keys(Keys.RETURN) first_video = driver.find_element_by_xpath( '/html/body/ytd-app/div/ytd-page-manager/ytd-search/div[1]/ytd-two-column-search-results-renderer/div/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-video-renderer[1]/div[1]/ytd-thumbnail/a/yt-img-shadow/img' ) action = ActionChains(driver) action.move_to_element(first_video).click() time.sleep(2) action.perform() uurl = driver.current_url url = uurl driver.quit() with youtube_dl.YoutubeDL(ydl_opts) as ydl: print("Downloding audio now\n") ydl.download([url]) for file in os.listdir("./"): if file.endswith(".mp3"): name = file os.rename(file, "song.mp3") print(f"Rename file {name}\n") voice.play(discord.FFmpegPCMAudio("song.mp3"), after=lambda e: print(f"{name} has stoped playing\n")) voice.source = discord.PCMVolumeTransformer(voice.source) voice.source.volume = 0.2 nname = name.rsplit("-", 2) await ctx.send(f"Playing: {nname[0]}") print("Playing") speak("Playing the song")
from selenium import webdriver import time options = webdriver.ChromeOptions() # options.set_headless() driver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver', ) driver.get('http://www.baidu.com') # driver.save_screenshot('baudu.png') # driver.find_element_by_id('kw').send_keys('帅哥') # driver.find_element_by_id('su').click() # driver.find_element_by_name('bg s_btn').click() time.sleep(2) # driver.find_element_by_link_text('下一页>').click # driver.find_element_by_class_name('n').click() # print(driver.page_source) # print(driver.get_cookie) # cookies = driver.get_cookies() # for cookie in cookies: # print(cookie['name']+':'+cookie['value']) from selenium.webdriver import ActionChains elelment = driver.find_element_by_xpath('//div[@id="ul"]/a[1]') ActionChains(driver).move_to_element(elelment).perform() ActionChains(driver).move_to_element(elelment).click(elelment).perform() time.sleep(5) driver.quit()
def test_Buyflow(self): action = ActionChains(self.driver) self.driver.get("http://automationpractice.com/index.php") product1 = self.driver.find_element(Locator.product1[0],Locator.product1[1]) product1_add = self.driver.find_element(Locator.product1_add_btn[0],Locator.product1_add_btn[1]) action.move_to_element(product1).move_to_element(product1_add).click().perform()
长按点击操作:action.click_and_hold(btn) 进行页面的移动,设置移动像素: action.move_by_offset(17, 0) 立即执行动作链:action.move_by_offset(17, 0).perform() 释放动作链:action.release() """ from selenium import webdriver from selenium.webdriver import ActionChains # 导入动作链 from time import sleep bro = webdriver.Chrome('chromedriver.exe') bro.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable') bro.switch_to.frame("iframeResult") btn = bro.find_element_by_id("draggable") action = ActionChains(bro) action.click_and_hold(btn) # 点击长按指定的标签 for i in range(5): action.move_by_offset(17, 0).perform() # 立即执行动作链操作 sleep(0.3) action.release() # 释放动作链 bro.quit()
def ali_pay(email, alipay_url, noWin=False, st_input=False): try: print('打开支付宝, 进行付款...') driver = Base(no_win=noWin) driver.get_driver() if noWin: driver.driver.set_window_size(2000, 1500) for _ in range(10): x = alipay_login(driver, email, alipay_url, st_input=st_input) if x == 1: break elif x == 11: return else: return 0 print('输入付款账号结束,进入确认付款') for i in range(2, 13): try: driver.Wait("payPassword_container", sleep=1) time.sleep(1) print('输入支付密码!...') ActionChains(driver.driver).send_keys(*alipay_Keys).perform() # sms_code = input("\n是否有短信息验证码?填写完成后按回车\n>>>") # ====================================== # # 短信验证码 # ====================================== # st = timess() try: driver.Wait(xpath='//*[@id="J_authSubmit"]', text=NC) ls = re.findall(r"id=['\"]ackcode['\"]", driver.driver.page_source) if ls and ls[0]: driver.driver.find_element_by_id('ackcode') driver.Wait("ackcode", input("发现短信验证码,请输入短信验证码\n>>>")) except Exception: pass print(timess() - st) driver.Wait(xpath='//*[@id="J_authSubmit"]', sleep=1) # try: # # driver.driver.switch_to.frame(driver.Wait(xpath='//iframe[@frameborder="no"]', text=NC, sleep=2)) # # driver.Wait("riskackcode") # # time.sleep(15) # # if "信息校验" in driver.driver.page_source: # # sms_code = input("\n请输入短信验证码\n>>>") # # driver.Wait("riskackcode", sms_code) # time.sleep(20) # pay_over(email) # return # # driver.Wait("J_authSubmit") # except Exception: # pass time.sleep(1) for _ in range(60): page = driver.driver.page_source if "确认付款" in page or '您已成功付款' in page: with open("logs/email_pay.log", "a", encoding="utf8") as f: f.write( f"付款完成: {email: <25}{time.strftime('%Y-%m-%d %H:%M:%S')}\n\n" ) driver.driver.save_screenshot( 'visa_photo/successful.png') return 1 elif "无法完成付款" in page: break elif "抱歉,您操作超时,请重新登录" in page: print("需要登录") return -1 else: time.sleep(0.1) # driver.driver.save_screenshot(f"visa_photo/fail{time.strftime('%m%d-%H%M%S')}.png") driver.Wait(xpath='//*[@id="J_GoBack_nobodyknows"]') driver.Wait(xpath='//div[@id="J-rcChannels"]/div/div/a[1]', sleep=0.5) driver.Wait(xpath=f'//*[@id="J_SavecardList"]/li[{i}]', sleep=0.5) time.sleep(1) except Exception as e: with open("logs/error.log", "a", encoding="utf-8") as f: f.write(f"\n\n{time.strftime('%m%d-%H%M%S')}\n" + e) else: driver.driver.save_screenshot('visa_photo/successful.png') return 1 except Exception: pass finally: driver.driver.quit()
def hover(self, *locator): element = self.driver.find_element(*locator) hover = ActionChains(self.driver).move_to_element(element) hover.perform()
def balance_excel(self, menu, value): print(menu) print(value) self.base = TestExcel() basedata = [Excel_basedata_zs, sheet48] # 点击一级菜单 self.findxpath_click(self.base.first_menu(basedata[0]).get(menu[1])) # 点击二级菜单 self.findxpath_click( self.base.second_menu(basedata[0]).get(f'{menu[1]}-{menu[2]}')) targetsheet = self.base.sheet_xpath_dic(basedata[0], basedata[1]) wait = (By.XPATH, targetsheet.get('加载等待')) # 根据自定义顺序执行操作 l = len(menu) n = 3 while n < l: if menu[n] not in self.base.operable_list(basedata[0], basedata[1]): print(f'操作元素"{menu[n]}"输入错误,请检查') return False else: findelement = self.findxpath(targetsheet.get(menu[n])) # 所有操作为"点击"或"勾选"的元素 if menu[n] in ['导出', '搜索']: findelement.click() if menu[n] in ['搜索']: self.wait_for_miss(120, wait) # 所有操作为"输入"的元素 elif menu[n] in ['查询日期']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) findelement.send_keys(value[n]) # 所有操作为"输入后选择"的元素 elif menu[n] in ['投组单元', '所在账户']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) sleep(1) findelement.send_keys(value[n] + Keys.SPACE + Keys.BACK_SPACE) sleep(1) if menu[n] in ['投组单元', '所在账户']: self.findxpath_click(targetsheet.get('投组下拉选择')) # 所有操作为"点击后选择"的元素 elif menu[n] in ['是否穿透', '产品类型']: a = self.base.enumeration_list2(basedata[0], basedata[1], menu[n]) if menu[n] == '产品类型': a.append('置空') if value[n] not in a: print(f'值"{value[n]}"输入错误,请检查') return False else: findelement.click() if menu[n] in ['是否穿透']: self.findxpath_click(f'//li[text()="{value[n]}"]') elif menu[n] in ['产品类型']: if value[n] == '置空': selectall = self.findxpath( '//div[text()=" 全选"]') action = ActionChains(self.driver) action.double_click(selectall).perform() findelement.click() elif value[n] == '全选': self.findxpath_click('//div[text()=" 全选"]') findelement.click() else: self.findxpath_click( f'//li[text()=" {value[n]}"]') n = n + 1 return True
def balance_compare(self, menu, value): print(menu) print(value) self.base = TestExcel() basedata = [Excel_basedata_zs, sheet48] # 点击一级菜单 self.findxpath_click(self.base.first_menu(basedata[0]).get(menu[2])) # 点击二级菜单 self.findxpath_click( self.base.second_menu(basedata[0]).get(f'{menu[2]}-{menu[3]}')) targetsheet = self.base.sheet_xpath_dic(basedata[0], basedata[1]) wait = (By.XPATH, targetsheet.get('加载等待')) # 根据自定义顺序执行操作 l = len(menu) n = 4 while n < l: if menu[n] not in self.base.operable_list(basedata[0], basedata[1]): print(f'操作元素"{menu[n]}"输入错误,请检查') return False else: findelement = self.findxpath(targetsheet.get(menu[n])) # 所有操作为"点击"或"勾选"的元素 if menu[n] in ['导出', '搜索']: findelement.click() if menu[n] in ['搜索']: self.wait_for_miss(120, wait) # 所有操作为"输入"的元素 elif menu[n] in ['查询日期']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) findelement.send_keys(value[n]) # 所有操作为"输入后选择"的元素 elif menu[n] in ['投组单元', '所在账户']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) sleep(1) findelement.send_keys(value[n] + Keys.SPACE + Keys.BACK_SPACE) sleep(1) if menu[n] in ['投组单元', '所在账户']: self.findxpath_click(targetsheet.get('投组下拉选择')) # 所有操作为"点击后选择"的元素 elif menu[n] in ['是否穿透', '产品类型']: a = self.base.enumeration_list2(basedata[0], basedata[1], menu[n]) if menu[n] == '产品类型': a.append('置空') if value[n] not in a: print(f'值"{value[n]}"输入错误,请检查') return False else: findelement.click() if menu[n] in ['是否穿透']: self.findxpath_click(f'//li[text()="{value[n]}"]') elif menu[n] in ['产品类型']: if value[n] == '置空': selectall = self.findxpath( '//div[text()=" 全选"]') action = ActionChains(self.driver) action.double_click(selectall).perform() findelement.click() elif value[n] == '全选': self.findxpath_click('//div[text()=" 全选"]') findelement.click() else: self.findxpath_click( f'//li[text()=" {value[n]}"]') n = n + 1 # 触发判断定位点 m = self.base.checkpoint_list(basedata[0], basedata[1]) point = self.findxpath( self.base.checkpoint_dic(basedata[0], basedata[1]).get(m[0])) # 旧环境的记录值 p = len(m) if point.is_displayed(): i = 0 x = {} while i < p: o = self.findxpath( self.base.checkpoint_dic(basedata[0], basedata[1]).get( m[i])).get_attribute('textContent') x.setdefault(m[i], o) i = i + 1 else: print('案例最终结果不存在查询数据,请合理调整步骤') return False # 关闭浏览器 self.end() # 新环境重复操作 self.start(value[1]) # 点击一级菜单 self.findxpath_click(self.base.first_menu(basedata[0]).get(menu[2])) # 点击二级菜单 self.findxpath_click( self.base.second_menu(basedata[0]).get(f'{menu[2]}-{menu[3]}')) # 根据自定义顺序执行操作 l = len(menu) n = 4 while n < l: if menu[n] not in self.base.operable_list(basedata[0], basedata[1]): print(f'操作元素"{menu[n]}"输入错误,请检查') return False else: findelement = self.findxpath(targetsheet.get(menu[n])) # 所有操作为"点击"或"勾选"的元素 if menu[n] in ['导出', '搜索']: findelement.click() if menu[n] in ['搜索']: self.wait_for_miss(120, wait) # 所有操作为"输入"的元素 elif menu[n] in ['查询日期']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) findelement.send_keys(value[n]) # 所有操作为"输入后选择"的元素 elif menu[n] in ['投组单元', '所在账户']: if value[n] == '置空': findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) else: findelement.send_keys(Keys.CONTROL, 'a' + Keys.BACK_SPACE) sleep(1) findelement.send_keys(value[n] + Keys.SPACE + Keys.BACK_SPACE) sleep(1) if menu[n] in ['投组单元', '所在账户']: self.findxpath_click(targetsheet.get('投组下拉选择')) # 所有操作为"点击后选择"的元素 elif menu[n] in ['是否穿透', '产品类型']: a = self.base.enumeration_list2(basedata[0], basedata[1], menu[n]) if menu[n] == '产品类型': a.append('置空') if value[n] not in a: print(f'值"{value[n]}"输入错误,请检查') return False else: findelement.click() if menu[n] in ['是否穿透']: self.findxpath_click(f'//li[text()="{value[n]}"]') elif menu[n] in ['产品类型']: if value[n] == '置空': selectall = self.findxpath( '//div[text()=" 全选"]') action = ActionChains(self.driver) action.double_click(selectall).perform() findelement.click() elif value[n] == '全选': self.findxpath_click('//div[text()=" 全选"]') findelement.click() else: self.findxpath_click( f'//li[text()=" {value[n]}"]') n = n + 1 # 新环境的记录值 r = 0 y = {} while r < p: s = self.findxpath( self.base.checkpoint_dic(basedata[0], basedata[1]).get( m[r])).get_attribute('textContent') y.setdefault(m[r], s) r = r + 1 # 局部校验 z = 0 if x.get(m[z]) == y.get(m[z]): # 全局校验 t = 0 while t < p: if x.get(m[t]) == y.get(m[t]): t = t + 1 else: print(f'对比结果:{m[t]}:{x.get(m[t])}数据核对不一致,请检查并联系开发') return False else: print(f'对比结果:{m[z]}:{x.get(m[z])}数据核对不一致,请检查并联系开发') return False print('对比结果:数据核对一致') return True
def start_crawl(journal, start_year, end_year, output_file): global type_browser, url, WAIT_SECONDS, MAX_NUM_PAPERS print('Start crawling papers from {} published during {} - {}'.format( journal, start_year, end_year)) browser = get_browser(type_browser, headless=False, use_proxy=False) browser.get(url) # 等待高级检索按键加载完成并点击 try: a_high_search = WebDriverWait(browser, WAIT_SECONDS).until( EC.element_to_be_clickable((By.LINK_TEXT, '高级检索'))) except TimeoutException as e: print('Timeout during waiting for loading of high search buttom.') browser.quit() return False a_high_search.click() time.sleep(1) # 切换到最新打开的窗口 windows = browser.window_handles browser.switch_to.window(windows[-1]) # 找到文献来源标签,用于后面判断点击刷新完成 try: span_src = browser.find_element_by_xpath( '//*[@id="gradetxt"]/dd[3]/div[2]/div[1]/div[1]/span') except NoSuchElementException as e: print(str(e)) browser.quit() return False # 等待学术期刊按键加载完成并点击 try: span_journal = WebDriverWait(browser, WAIT_SECONDS).until( EC.element_to_be_clickable( (By.XPATH, '//ul[@class="doctype-menus keji"]/li[@data-id="xsqk"]/a/span' ))) except TimeoutException as e: print('Timeout during waiting for loading of journal buttom.') browser.quit() return False browser.execute_script('arguments[0].click();', span_journal) # 通过文献来源标签的过期判断刷新完成 try: WebDriverWait(browser, WAIT_SECONDS).until(EC.staleness_of(span_src)) except TimeoutException as e: print('Timeout during refresh after clicking journal buttom') browser.quit() return False # 等待期刊名称输入框加载完成 try: input_journal = WebDriverWait(browser, WAIT_SECONDS).until( EC.presence_of_element_located( (By.XPATH, '//*[@id="gradetxt"]/dd[3]/div[2]/input'))) except TimeoutException as e: print('Timeout during waiting for input box for jounral name.') browser.quit() return False input_journal.send_keys(journal) # 输入期刊名 time.sleep(1) # 找到起始年输入框并输入起始年 try: input_start_year = browser.find_element_by_xpath( '//input[@placeholder="起始年"]') except NoSuchElementException as e: print(str(e)) browser.quit() return False input_start_year.send_keys(start_year) # 找到结束输入框并输入结束年 try: input_end_year = browser.find_element_by_xpath( '//input[@placeholder="结束年"]') except NoSuchElementException as e: print(str(e)) browser.quit() return False input_end_year.send_keys(end_year) # 找到检索键并点击 try: input_search = browser.find_element_by_xpath('//input[@value="检索"]') except NoSuchElementException as e: print(str(e)) browser.quit() return False input_search.click() # 根据搜索结果总数标签的出现判断页面是否刷新完成 try: em_total = WebDriverWait(browser, WAIT_SECONDS).until( EC.visibility_of_element_located( (By.XPATH, '//*[@id="countPageDiv"]/span[1]/em'))) except TimeoutException as e: print('Timeout during waiting for paper number cell.') browser.quit() return False ''' # 检查筛选后的文献总数是否超过1500条,超过1500条需要输入验证码,这里直接放弃 total = str2int(em_total.text) if total > MAX_NUM_PAPERS: print('Number of papers: {} > {}. Journal: {}, year: {} - {}.'.format(total, MAX_NUM_PAPERS, journal, start_year, end_year)) browser.quit() return False ''' # 找到每页文献数标签并点击使其展开 try: div_perpage = browser.find_element_by_id( 'perPageDiv').find_element_by_tag_name('div') except NoSuchElementException as e: print(str(e)) browser.quit() return False browser.execute_script("arguments[0].scrollIntoView();", div_perpage) div_perpage.click() # 等待文本为50的标签加载完成并点击 try: li_50 = WebDriverWait(browser, WAIT_SECONDS).until( EC.element_to_be_clickable( (By.XPATH, '//div[@id="perPageDiv"]/ul/li[@data-val="50"]'))) except TimeoutException as e: print( 'Timeout during waiting for loading of buttom perPageDiv. Journal: {}, year: {} - {}.' .format(journal, start_year, end_year)) browser.quit() return False # 寻找每页文献数量标签 try: span = browser.find_element_by_id( 'perPageDiv').find_element_by_tag_name('span') except NoSuchElementException as e: print(str(e)) browser.quit() return False li_50.click() # 通过每页文献数量标签的过期,判断文献列表刷新完成 try: WebDriverWait(browser, WAIT_SECONDS).until(EC.staleness_of(span)) except TimeoutException as e: print( 'Timeout during waiting for refresh of search results after clciking buttom perPageDiv. Journal: {}, year: {} - {}.' .format(journal, start_year, end_year)) browser.quit() return False cols = ['篇名', '作者', '期刊名称', '发表时间', '被引次数', '被下载次数'] # 需要保存的信息种类 col2index = {} for i, col in enumerate(cols): col2index[col] = i + 1 result = [] page_cnt = 0 # 保存所有页的文献信息 while True: page_cnt += 1 # 文献信息保存在表格中,表格的一行(tr)对应一篇文献信息 try: trs = browser.find_elements_by_xpath( '//*[@id="gridTable"]/table/tbody/tr') except NoSuchElementException as e: print(str(e)) browser.quit() return False # 保存当前页的所有文献信息 for tr in trs: # 表格的一列(td)对应一篇文献的特定信息,如篇名、作者 try: tds = tr.find_elements_by_tag_name('td') except NoSuchElementException as e: print(str(e)) browser.quit() return False ''' # 检查期刊名是否符合预期(可能模糊匹配) if not(tds[3].text == journal or tds[3].text.startswith(journal + '(')): print('Actual source: {}, expected source: {}'.format(tds[3].text, journal)) continue # 检查发表时间是否符合预期 y = int(tds[4].text.split('-')[0]) if not check_date(y, start_year, end_year): print('Actual year: {}, expected year: {} - {}. Journal: {}, year: {} - {}.'.format(y, start_year, end_year, journal, start_year, end_year)) continue ''' info = OrderedDict() info['时间段'] = '2012-2020' for col in cols: if col == '发表时间': info[col] = tds[col2index[col]].text else: try: a = tds[col2index[col]].find_element_by_tag_name('a') except NoSuchElementException as e: info[col] = '' continue info[col] = a.text result.append(info) # 寻找下一页按键 try: next_page = WebDriverWait(browser, WAIT_SECONDS).until( EC.element_to_be_clickable((By.ID, 'PageNext'))) except TimeoutException: break # 将鼠标拖动到下一页按键附近并点击 browser.execute_script("arguments[0].scrollIntoView();", next_page) span = browser.find_element_by_xpath('//span[@class="cur"]') ActionChains(browser).move_to_element(next_page).click().perform() # 30页必出验证码 if (page_cnt % 30) == 0: time.sleep(15) # 通过当前页码标签判断页面是否刷新 try: WebDriverWait(browser, WAIT_SECONDS).until(EC.staleness_of(span)) except TimeoutException as e: print( 'Timeout during waiting for refresh of current page after clicking next page. Journal: {}, year: {} - {}.' .format(journal, start_year, end_year)) browser.quit() return False # 将爬取结果保存到excel中 if len(result): df = pd.DataFrame(data=result) df.to_excel(output_file) browser.quit() print( 'Finish crawling papers from {} published in year: {} - {}, number of papers: {}' .format(journal, start_year, end_year, len(result))) return len(result) > 0
def sel_click(driver,element): ActionChains(driver).move_to_element(element).click(element).perform()
def sel_input_text(driver,element,text): ActionChains(driver).move_to_element(element).click(element).send_keys(text, Keys.ENTER).perform()
driver.get(url) date_l,headline_l,des_l,author_l,vertical_l = [],[],[],[],[] links = driver.find_elements_by_xpath("//td[@align='center']//a") for link in links[9:]: if link.get_attribute('text') != '': ######## set date block -> start ######## window_before = driver.window_handles[0] # get root tab ActionChains(driver).key_down(Keys.CONTROL).click(link).key_up(Keys.CONTROL).perform() # open the link in new tab window_after = driver.window_handles[-1] driver.switch_to.window(window_after) #%%%%%%%%%% iterate thorugh links of the selected date -> start %%%%%%%%%%%%%% for i in range(len(driver.find_elements_by_xpath("//tr/td/span/a"))): ActionChains(driver).move_to_element(driver.find_elements_by_xpath("//tr/td/span/a")[i]).click().perform() #driver.find_elements_by_xpath("//tr/td/span/a")[i].click() #&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& get data block -> start &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
driver.find_element_by_id('departCityName').send_keys(from_station) sleep(1) #到达城市 driver.find_element_by_id('arriveCityName').send_keys(arrive_station) sleep(1) #移除出发时间的’readonly‘属性 driver.execute_script( "document.getElementById('departDate').removeAttribute('readonly')") #出发时间 departDate = driver.find_element_by_id('departDate') departDate.clear() departDate.send_keys(tommorrow) sleep(1) #为了解决日期控件弹出框在输入日期后无法消失的问题,要让鼠标单击一下页面的空白处 ActionChains(driver).move_by_offset(0, 0).click().perform() #开始搜索 searchbtn = driver.find_element_by_class_name('searchbtn') searchbtn.click() sleep(1) #选择G7674班次的二等座 driver.find_element_by_css_selector( 'body > div:nth-child(32) > div > div.lisBox > div.List-Box > div > div:nth-child(14) > div.w6 > div:nth-child(1) > a' ).click() sleep(3) driver.find_element_by_class_name('input-name').send_keys('小丸子') #注意的点: # 1、日期弹框可能无法输入,要考虑JavaScrip来消除日期选框的readonly
from selenium import webdriver from selenium.webdriver import ActionChains browser = webdriver.Chrome() url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable' browser.get(url) # 动作链联练习 browser.switch_to.frame('iframeResult') # 拖拽位置 source = browser.find_element_by_css_selector('#draggable') # 目标位置 target = browser.find_element_by_css_selector('#droppable') # 声明ActionChains对象并赋值为actions变量 actions = ActionChains(browser) # 调用执行拖拽动作 actions.drag_and_drop(source, target) actions.perform()
def auth_slide(self): def get_distance(img1, img2): """计算滑动距离""" threshold = 60 # 忽略可动滑块部分 start_x = 57 for i in range(start_x, img1.size[0]): for j in range(img1.size[1]): rgb1 = img1.load()[i, j] rgb2 = img2.load()[i, j] res1 = abs(rgb1[0] - rgb2[0]) res2 = abs(rgb1[1] - rgb2[1]) res3 = abs(rgb1[2] - rgb2[2]) if not (res1 < threshold and res2 < threshold and res3 < threshold): return i - 7 # 经过测试,误差为大概为7 def get_tracks(distance): """ 制造滑动轨迹 策略:匀加速再匀减速,超过一些,再回调,左右小幅度震荡 """ v = 0 current = 0 t = 0.2 tracks = [] if not distance: return tracks # 正向滑动 while current < distance + 10: if current < distance * 2 / 3: a = 2 else: a = -3 s = v * t + 0.5 * a * (t**2) current += s tracks.append(round(s)) v = v + a * t # 往回滑动 current = 0 while current < 13: if current < distance * 2 / 3: a = 2 else: a = -3 s = v * t + 0.5 * a * (t**2) current += s tracks.append(-round(s)) v = v + a * t # 最后修正 tracks.extend([2, 2, -3, 2]) return tracks # 1. 截取完整图片 if self.get_full_img: time.sleep(2) # 等待图片加载完毕 img_before = self.get_img() else: img_before = self._img_before # 2. 点击出现缺口图片 slider_btn = self.driver.find_element_by_class_name( "geetest_slider_button") slider_btn.click() # 3. 截取缺口图片 time.sleep(2) # 等待图片加载完毕 img_after = self.get_img() # 4. 生成移动轨迹 tracks = get_tracks(get_distance(img_before, img_after)) # 5. 模拟滑动 slider_btn = self.driver.find_element_by_class_name( "geetest_slider_button") ActionChains(self.driver).click_and_hold(slider_btn).perform() for track in tracks: ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=0).perform() # 6. 释放鼠标 time.sleep(0.5) # 0.5秒后释放鼠标 ActionChains(self.driver).release().perform() # 7. 验证是否成功 time.sleep(2) div_tag = self.driver.find_element_by_class_name( "geetest_fullpage_click") if "display: block" in div_tag.get_attribute("style"): '''判断模块对话框是否存在,如果存在就说明没有验证成功,"display: block",重新去验证''' self.get_full_img = False setattr(self, "_img_before", img_before) return self.auth_slide() else: # 如果验证成功"display: none" time.sleep(1000) return True
def __init__(self, driver): self.driver = driver # self.action = webdriver.ActionChains(driver) self.action = ActionChains(driver) self.configManager = ConfigManager()
def test_web_regressions(browser, basic_html): html_dir, dvcs = basic_html bad_commit_hash = dvcs.get_hash('master~9') browser.set_window_size(1200, 900) with tools.preview(html_dir) as base_url: get_with_retry(browser, base_url) regressions_btn = browser.find_element_by_link_text('Regressions') regressions_btn.click() # Check that the expected links appear in the table regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)') regression_2 = browser.find_element_by_link_text('params_examples.track_find_test(2)') bad_hash_link = browser.find_element_by_link_text(bad_commit_hash[:8]) href = regression_1.get_attribute('href') assert '/#params_examples.track_find_test?' in href assert 'commits=' in href # Sort the tables vs. benchmark name (PhantomJS doesn't allow doing it via actionchains) browser.execute_script("$('thead th').eq(0).stupidsort('asc')") WebDriverWait(browser, WAIT_TIME).until(EC.text_to_be_present_in_element( ('xpath', '//table[1]/tbody/tr[1]/td[1]'), 'params_examples.track_find_test(1)' )) # Check the contents of the table table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr') assert len(table_rows) == 2 cols1 = [td.text for td in table_rows[0].find_elements_by_xpath('td')] cols2 = [td.text for td in table_rows[1].find_elements_by_xpath('td')] assert cols1[0] == 'params_examples.track_find_test(1)' assert cols2[0] == 'params_examples.track_find_test(2)' assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols1[1]) assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols2[1]) assert cols1[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore'] assert cols2[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore'] # Check that the ignore buttons work as expected buttons = [button for button in browser.find_elements_by_xpath('//button') if button.text == 'Ignore'] buttons[0].click() # The button should disappear, together with the link WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(buttons[0])) WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(regression_1)) table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr') assert len(table_rows) == 1 # There's a second button for showing the links, clicking # which makes the elements reappear show_button = [button for button in browser.find_elements_by_xpath('//button') if button.text == 'Show ignored regressions...'][0] show_button.click() regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)') WebDriverWait(browser, WAIT_TIME).until(EC.visibility_of(regression_1)) table_rows = browser.find_elements_by_xpath('//table[2]/tbody/tr') assert len(table_rows) == 1 # There's a config sample element pre_div = browser.find_element_by_xpath('//pre') assert "params_examples\\\\.track_find_test\\\\(1\\\\)" in pre_div.text # There's an unignore button that moves the element back to the main table unignore_button = [button for button in browser.find_elements_by_xpath('//button') if button.text == 'Unignore'][0] unignore_button.click() browser.find_elements_by_xpath('//table[1]/tbody/tr[2]') # wait until the table has two rows table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr') assert len(table_rows) == 2 # Check that a plot of some sort appears on mouseover. The # page needs to be scrolled first so that the mouseover popup # has enough space to appear. regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)') y = regression_1.location['y'] browser.execute_script('window.scrollTo(0, {0})'.format(y - 200)) chain = ActionChains(browser) chain.move_to_element(regression_1) chain.perform() popover = browser.find_element_by_css_selector('div.popover-content') flotplot = browser.find_element_by_css_selector('canvas.flot-base')
def bot(): # set paths for webdriver + initialize options = Options() options.add_argument('--incognito') options.binary_location = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe' driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) driver.maximize_window() driver.get( 'https://www.linkedin.com/login?fromSignIn=true&trk=guest_homepage-basic_nav-header-signin' ) time.sleep(1) # enter login info user = driver.find_element_by_xpath( '/html/body/div/main/div[2]/form/div[1]/input') user.send_keys('EMAIL') pswd = driver.find_element_by_xpath( '/html/body/div/main/div[2]/form/div[2]/input') pswd.send_keys('PASSWORD') # submit form, try catch because it was having issues finding the button by a single absolute path try: driver.find_element_by_xpath( '/html/body/div/main/div[2]/form/div[4]').click() except Exception: try: driver.find_element_by_xpath( '/html/body/div/main/div[2]/form/div[3]/button').click() except Exception: driver.find_element_by_xpath( '//*[@id="app__container"]/main/div[2]/form/div[4]/button' ).click() try: driver.find_element_by_link_text('Sign in').click() except Exception: quit() time.sleep(1) # try catch for mobile authentication, seems unnecessary as of now try: driver.find_element_by_xpath( '/html/body/div/div[1]/section/div[2]/div/article/footer/div/div/button' ).click() except Exception: print('No auth needed') driver.find_element_by_xpath( '/html/body/div[7]/header/div[2]/nav/ul/li[2]/a/span').click() time.sleep(5) driver.find_element_by_xpath( '/html/body/div[7]/div[3]/div/div/div/div/div/aside/div/div/div[1]/section/div/div[1]/a' ).click() time.sleep(3) # need a way to both A: scroll through every single connection B: search for specific line in header of title # NEED CRITERIA AND MESSAGE TO SEND li_count = 11 while True: try: title = driver.find_element_by_xpath( '/html/body/div[7]/div[3]/div/div/div/div/div/div/div/div/section/ul/li[' + str(li_count) + ']/div[1]/a/span[4]') actions = ActionChains(driver) actions.move_to_element(title).perform() temp = title.text # if li_count < 123: # li_count += 1 # continue if "Booz Allen" in temp and ( "Lead Associate" in temp or "Senior Associate" in temp or "Sr. Associate" in temp) and ( "Vice President" not in temp or "Program Manager" not in temp ): # and criteria in temp: #need criteria from pamela name = driver.find_element_by_xpath( '/html/body/div[7]/div[3]/div/div/div/div/div/div/div/div/section/ul/li[' + str(li_count) + ']/div[1]/a/span[2]') name = str(name.text) print(name) name = name.split(' ') name = name[0] driver.find_element_by_xpath( '/html/body/div[7]/div[3]/div/div/div/div/div/div/div/div/section/ul/li[' + str(li_count) + ']/div[2]/div[1]/button/span[1]').click() msgb = driver.find_element_by_xpath( '/html/body/div[7]/aside/div[2]/div[1]/form/div[3]/div/div[1]/div[1]' ) time.sleep(2) msgb.send_keys( name + '- Glad to connect here. Advantech proudly continues to receive accolades from BAH VPs and PMs as your Technical & Professional Services HUBZone teammate for over 14 years, having supported BAH on 40+ national contracts, which is the reason our organization mirrors Booz Allen.\nPlease take a look at us at: http://advantech-gs.com.\n\nWe help Booz win by adding the hard to meet HUBZone goals; as you know, HUBZones in the professional service sector are very rare, so we are proud to provide competence and excellence in services, a differentiation that adds value to your team!\n\nWe\'re very proud of our reputation providing successful staff and contract management to support BAH, and I\'ve linked a reference here for you, if you\'d like to see what others at BAH have to say.\nhttps://www.linkedin.com/posts/advantech-gs-enterprises-inc_activity-6719290412378537984-2u8g\n\nPlease let me know what other information we can provide, or if you\'d like to have a quick call re: how else we can help you.\n\nMy email address is [email protected], and my cell number is 858.705.3069.\n\nThank you for your continued support!\nVery respectfully,\nJack Fraser, President & CEO\n' ) time.sleep(2) driver.find_element_by_xpath( '/html/body/div[7]/aside/div[2]/div[1]/form/footer/div[2]/div[1]/button' ).click() time.sleep(2) driver.find_element_by_xpath( '/html/body/div[7]/aside/div[2]/header/section[2]/button[2]' ).click() li_count += 1 else: li_count += 1 except Exception: print('failed to find title') li_count += 1 continue
def shubiao_zhixiang_xpath(self,shuxing): try: article = geturl.llq.find_element_by_xpath(shuxing) ActionChains(geturl.llq).move_to_element(article).perform() except: geturl.error(1)
from selenium import webdriver from selenium.webdriver.common.by import By from init import Init from selenium.webdriver import ActionChains import time driver = webdriver.Chrome(executable_path=Init.driver) driver.get(Init.url9) driver.implicitly_wait(10) driver.maximize_window() username = '******' password = '******' login = '******' driver.find_element(By.XPATH, username).clear() driver.find_element(By.XPATH, password).clear() driver.find_element(By.XPATH, username).send_keys(Init.username) driver.find_element(By.XPATH, password).send_keys(Init.password) driver.find_element(By.XPATH, login).click() header = driver.find_element(By.XPATH, '//*[@id="main"]/div[1]/span/div/div') actions = ActionChains(driver) actions.context_click(header).perform()
def joongonara(keyword): url = 'https://m.joongna.com/search-list/product?searchword={}&dateFilter=7'.format( keyword) options = webdriver.ChromeOptions() # options.add_argument("user-agent={}".format(UserAgent().chrome)) options.add_argument( "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36" ) options.add_argument("headless") driver = webdriver.Chrome(options=options) driver.get(url) tag = driver.find_element_by_xpath( '//*[@id="root"]/div[1]/div[2]/div[1]/div/button') action = ActionChains(driver) while True: try: action.move_to_element(tag).perform() driver.find_element_by_xpath( '//*[@id="root"]/div[1]/div[2]/div[1]/div/button/span').click( ) time.sleep(2) except: break links = driver.find_elements_by_css_selector('.pd_h20 div > div > a') print("loaded {} items!!!".format(len(links))) df = [] n = 1 for link in links: link = link.get_attribute('href') options = webdriver.ChromeOptions() options.add_argument("headless") # options.add_argument("user-agent={}".format(UserAgent().chrome)) options.add_argument( "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36" ) driver = webdriver.Chrome(options=options) driver.get(link) time.sleep(1) try: title = driver.find_element_by_css_selector('.pd20 > p').text view_count = driver.find_element_by_css_selector( 'p .c_orange').text desc = driver.find_elements_by_css_selector( '.ProductDetailComponent_tag__1sc7f.mb8')[0].text price = driver.find_element_by_css_selector('.pd20 strong').text except: print("err", end=" ") driver.quit() continue try: region = driver.find_elements_by_css_selector( 'button .f15')[0].text except: region = None df.append({ 'title': title, 'price': price, 'region': region, 'view_counts': view_count, 'desc': desc, 'link': link, 'market': '중고나라', 'keyword': keyword }) driver.quit() print(n, end=" ") n += 1 driver.quit() today = datetime.now() delta = timedelta(days=1) today_1 = today - delta config = configparser.ConfigParser() config.read('/home/ubuntu/masterpiece/mongo.ini') mongodb_ip = config["mongo"] client = pymongo.MongoClient(mongodb_ip["ip_address"]) db = client.joongo collection = db["D{}".format(today.strftime('%y%m%d'))] collection.insert(df) client.joongo.drop_collection("D{}".format(today_1.strftime('%y%m%d'))) print("Done Crawling and Insert into Mongodb") return pd.DataFrame(df)
def hover(self, element): ActionChains(self.driver).move_to_element(element).perform()
element = driver.find_element_by_css_selector( "#fcxH9b > div.WpDbMd > c-wiz:nth-child(5) > div > c-wiz > div > div > c-wiz > c-wiz:nth-child(1) > c-wiz > div > div.Z3lOXb > div.W9yFB > a" ) element.click() '''페이지에 들어가자마자 50위 수집''' #순위를 수집하는 코드 firstElementList = driver.find_elements_by_css_selector( "#fcxH9b > div.WpDbMd > c-wiz:nth-child(6) > div > c-wiz > div > c-wiz > c-wiz > c-wiz > div > div.ZmHEEd > div" ) #수집한 순위의 마지막 요소를 선택 LastElement = firstElementList[-1] #마지막 요소로 스크롤을 이동시켜라 ActionChains(driver).move_to_element(LastElement).perform() #스크롤을 내리고 3초 쉬게 한다 요소가 불러올시간을 주기위함 '''스크롤을 내림으로써 새롭게 생긴 태그를 다시한번 불러온다''' for i in range(1, 4): time.sleep(3) elementList = driver.find_elements_by_css_selector( "#fcxH9b > div.WpDbMd > c-wiz:nth-child(6) > div > c-wiz > div > c-wiz > c-wiz > c-wiz > div > div.ZmHEEd > c-wiz" ) LastElement = elementList[-1] ActionChains(driver).move_to_element(LastElement).perform() #firstElementList -1 ~50까지 앱 요소 #elementList - 51~200위 까지 앱 요소 elementList = firstElementList + elementList
def double_click(context, element): ActionChains(context.driver).double_click(element).perform()
from selenium import webdriver from selenium.webdriver import ActionChains import time driver = webdriver.Chrome() driver.get("https://www.qcc.com") driver.maximize_window() # 弹出框 driver.find_element_by_xpath( "//*[@id='addfavorModal']/div/div/div[1]/img[1]").click() # 登录 driver.find_element_by_link_text("登录 | 注册").click() time.sleep(5) # 获取模块 ele = driver.find_element_by_xpath("//*[@id='nc_1_n1z']") # actionchain ac = ActionChains(driver) ac.click_and_hold(ele).move_by_offset(348, 0).perform() time.sleep(3) driver.quit()
def extract_user_info(driver, review): user_dict = {} #find the user profile and set up the click action; try: reviewer_prof = review.find_element_by_xpath( './/div[contains(@class,"memberOverlayLink")]') Click = ActionChains(driver).click(reviewer_prof) #if that fails, it's probably because the user has no account (probably deleted?) except: user_dict['username'] = '******' return user_dict #click on the profile and wait until it loads profile_wait = WebDriverWait(driver, 3) try: Click.perform() profile = profile_wait.until( EC.presence_of_element_located( (By.XPATH, '//div[@class="memberOverlayRedesign g10n"]'))) except: user_dict['username'] = None return user_dict #extract user info #username reviewer_link = driver.find_element_by_xpath( '//div[@class="memberOverlayRedesign g10n"]/a').get_attribute('href') reviewer_username = re.sub('.*members/', '', reviewer_link) #demos reviewer_demos = driver.find_elements_by_xpath( '//ul[@class="memberdescriptionReviewEnhancements"]/*') reviewer_demos = [element.text for element in reviewer_demos] reviewer_demos = ";".join(reviewer_demos) #reviewer rating histogram histogram_rows = driver.find_elements_by_xpath( '//div[@class="chartRowReviewEnhancements"]') review_hist = [] for row in histogram_rows: n_reviews = row.find_element_by_xpath('./span[3]').text review_hist += [n_reviews] review_hist = ";".join(review_hist) #reviewer helpful votes try: helpvotes = driver.find_element_by_xpath( '//div[@class="memberOverlayRedesign g10n"]//*[contains(text(), "Helpful votes")]' ).text except: helpvotes = None #reviewer cities visited try: n_cities_visited = driver.find_element_by_xpath( '//div[@class="memberOverlayRedesign g10n"]//*[contains(text(), "Cities visited")]' ).text except: n_cities_visited = None #reviewer tags tags = driver.find_elements_by_xpath( '//a[@class="memberTagReviewEnhancements"]') tags = [tag.text for tag in tags] tags = ";".join(tags) #build dict item user_dict['username'] = reviewer_username user_dict['demos'] = reviewer_demos user_dict['review_hist'] = review_hist user_dict['helpvotes'] = helpvotes user_dict['n_cities_visited'] = n_cities_visited user_dict['tags'] = tags user_dict['time_accessed'] = str(datetime.datetime.now()) #close the user profile -- make sure it's closed before moving on close_profile_button = driver.find_element_by_xpath( '//span[@class="ui_overlay ui_popover arrow_left "]/div[@class="ui_close_x"]' ) close_profile_button.click() profile_wait.until_not( EC.presence_of_element_located( (By.XPATH, '//span[@class="ui_overlay ui_popover arrow_left "]'))) return user_dict
# 2.获取用户名和密码的输入框,和登录按钮 username = dirver.find_element_by_name('username') password = dirver.find_element_by_xpath(".//*[@id='loginForm']/input[2]") login_button = dirver.find_element_by_xpath("//input[@type='submit']") # 3.使用send_keys方法输入用户名和密码,使用click方法模拟点击登录 username.send_keys("qiye") password.send_keys("qiye_pass") login_button.click() # 轮流点击选项卡 #select = dirver.find_element_by_xpath("//form/select") #all_options = select.find_elements_by_tag_name("option") #for option in all_options: # print("Value is: %s"% option.get_attribute("value")) # option.click() # 根据索引/文字/value值来获取选项卡 from selenium.webdriver.support.ui import Select #select = Select(dirver.find_element_by_xpath('//form/select ')) #select.select_by_index(1) #select.select_by_visible_text("用户名") #select.select_by_value("name") # 元素拖拽 element = dirver.find_element_by_name("source") target = dirver.find_element_by_name("target") from selenium.webdriver import ActionChains action_chains = ActionChains(dirver) action_chains.drag_and_drop(element,target).perform()