class AcceptanceTest(StaticLiveServerTestCase): def setUp(self): options = Options() options.headless = True self.driver = Firefox(options=options) def tearDown(self): self.driver.quit() def test_two_inputs(self): driver = self.driver driver.get('{live_server_url}/two-field-test/'.format( live_server_url=self.live_server_url )) WebDriverWait(driver, 5).until( wait_for_utils_script() ) inputs = driver.find_elements_by_css_selector('input.intl-tel-input') inputs[0].send_keys('555-5555') inputs[1].send_keys('555-4444') inputs[1].send_keys(Keys.RETURN) WebDriverWait(driver, 5).until( EC.presence_of_element_located((By.ID, 'success-text')) ) self.assertIn('Form is valid', driver.page_source)
def add_advert(): print("Add new advertisement.") g = Grab(log_file="2.html") g.load_cookies('cookies.txt') g.go("http://m.avito.ru/add") #login_test() from selenium.webdriver import Firefox from selenium.webdriver.common.keys import Keys import selenium from PIL import Image browser = Firefox() driver = selenium.webdriver.Firefox() browser.get('http://m.avito.ru/profile/login') driver.implicitly_wait(10) elem = driver.find_element_by_css_selector(".control-self.control-self-email") elem.send_keys("*****@*****.**") """ driver.find_element_by_name("password") element.send_keys("ivveqaem") driver.find_element_by_class_name("control-self control-self-submit button button-solid button-blue button-large") driver.find_element_by_partial_link_text("Войти") element.send_keys(Keys.ENTER) """ #browser.get('http://m.avito.ru/add') browser.save_screenshot('current_page') current_page_img = Image.open('current_page') w, h = current_page_img.size captcha_img = current_page_img#.crop((575, 505, w-155, h-1820)) captcha_img.save('captcha.jpg', 'jpeg')
class Scraper(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.url = 'http://www.jerryvarghese.com/Job-Search/ReqSearch.aspx?p=0&locID=121&loc=Saudi%20Arabia' #self.base_job_url = 'https://sjobs.brassring.com/TGWebHost/jobdetails.aspx?' self.browser = Firefox() self.first_page_search_opening_id = 'srchOpenLink' self.second_page_search_btn_id = 'ctl00_MainContent_submit2' self.next_link_id = 'yui-pg0-0-next-link' def page_links(self): job_link_id = 'Openings1_Rptr_FieldName_ct{}_lknReqTitle' for i in range(100, 115): link_id = job_link_id.format(str(i)) link = self.browser.find_element_by_id(link_id) link.click() def main(self): try: self.browser.get(self.url) self.page_links() counter = 1 except Exception as e: print 'exception= ', str(e) #print 'stacktrace= ', traceback.print_exc() print 'Line Number= ' + str(sys.exc_traceback.tb_lineno)
def driver(capabilities): options = Options() driver = Firefox( capabilities=capabilities, firefox_options=options) yield driver driver.quit()
def __init__(self, url, headless=False): if headless: self.makeHeadLess() ffx.__init__(self) self.url = url self.minimum_timeout = 60 self.headless = headless self.timed_out = False self.load_time = -99 self.login_time = -99 self.logout_time = -99 # Logging logfmt = '%(levelname)s [%(asctime)s]:\t %(message)s' datefmt= '%m/%d/%Y %I:%M:%S %p' formatter = logging.Formatter(fmt=logfmt,datefmt=datefmt) self.logger = logging.getLogger('__main__') logging.root.setLevel(logging.DEBUG) rfh = RotatingFileHandler(filename="/diska/home/jonny/sw/python/stress/stress/gp_timings.log",maxBytes=1048576,backupCount=3,mode='a') rfh.setFormatter(formatter) ch = logging.StreamHandler() ch.setFormatter(formatter) self.logger.handlers = [] self.logger.addHandler(ch) self.logger.addHandler(rfh)
def test_test2(self): """ test_test2 """ driver = Firefox() driver.get('https://www.google.com/') print('TestCaseCustomSearch test_test2')
class CaptchaPage(): def __init__(self): print "Captcha Page Initializing" parser = ConfigParser.ConfigParser() base_path = os.path.join(os.environ['HOME'], '.mozilla/firefox/') parser.read(os.path.join(base_path, "profiles.ini")) profile_path = os.path.join(base_path, filter(lambda x: x[0].lower() == 'path', parser.items('Profile0'))[0][1]) try: profile = FirefoxProfile(profile_path) except OSError: raise Exception("You must execute the following command:\nsudo chmod +r -R %s" % profile_path) self.driver = Firefox(profile) self.driver.get("file://%s/index.html" % os.getcwdu()) def get_url_sound(self): self.driver.find_element_by_xpath('//*[@id="recaptcha_switch_audio"]').click() return self.driver.find_element_by_xpath('//*[@id="recaptcha_audio_download"]').get_attribute('href') def get_recaptcha_challenge_field(self): return self.driver.find_element_by_xpath('//*[@id="recaptcha_challenge_field"]').get_attribute('value') def get_captcha_textbox(self): print "Getting Captcha Textbox" return Textbox(self.driver.find_element_by_xpath('//*[@id="recaptcha_response_field"]')) def get_submit_button(self): print "Getting Submit Form Button" return Button(self.driver.find_element_by_xpath("/html/body/form/input")) def close(self): print "Closing Captcha Page" self.driver.close()
def __init__(self): CrawlSpider.__init__(self) print "szlibspider start" self.verificationErrors = [] self.selenium = selenium("localhost", 4444, "*firefox /usr/lib/firefox/firefox", "http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") ffdriver = Firefox() self.selenium.start(driver=ffdriver) # self.selenium.start() sel = self.selenium # sel.open("http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") ffdriver.get("http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") WebDriverWait(ffdriver,30).until(ajax_complete, "Timeout waiting page to load") #Wait for javscript to load in Selenium # time.sleep(20) # sel.wait_for_condition("condition by js", 20000); # print "ul/li visible? %s" % sel.is_element_present("//ul[@class='servicepointlist']") elements = ffdriver.find_elements_by_xpath("//ul[@class='servicepointlist']/li[@class='item']") for element in elements[:5]: print "%s" % element.find_element_by_class_name("num").text print "%s" % element.find_element_by_class_name("title").text print "%s" % element.find_element_by_class_name("text").text print "---------------"
def ff(): browser = Firefox() browser.get('https://www.charter.com/browse/content/new-channel-lineup') wait = WebDriverWait('browser', 20) button = browser.find_element_by_xpath('//*[@id="select-state"]/div[2]') # find and click Choose State button button.click()
def temp_main(): urls = map(lambda x: x[1], gen_url_contactinfo()) urls = map(lambda x: x + "?spm=a2615.7691481.0.0.OCyk7j", urls) driver = Firefox() driver.maximize_window() for url in urls: print(url) get_parser(url, driver)
def control(request): driver = Firefox() site = os.path.abspath(os.path.dirname(__file__)) site = os.path.join(site, 'test.html') driver.get(site) request.instance.page = MainPage(driver) def fin(): driver.quit() request.addfinalizer(fin)
def driver(request): browser = request.param if browser == 'firefox': driver = Firefox() elif browser == 'chrome': driver = Chrome() else: raise ValueError("Unknown browser '{}'".format(browser)) request.addfinalizer(lambda: driver.quit()) return driver
def test_rendering_utf8_iframe(): iframe = elem.IFrame(html=u'<p>Cerrahpaşa Tıp Fakültesi</p>') options = Options() options.add_argument('-headless') driver = Firefox(options=options) driver.get('data:text/html,' + iframe.render()) driver.switch_to.frame(0) assert u'Cerrahpaşa Tıp Fakültesi' in driver.page_source
def test_we_can_launch_multiple_firefox_instances(capabilities): driver1 = Firefox(capabilities=capabilities) driver2 = Firefox(capabilities=capabilities) driver3 = Firefox(capabilities=capabilities) driver1.quit() driver2.quit() driver3.quit()
def print_prices(): driver = Firefox() driver.get("https://cars.mail.ru/sale/msk/all/?order=price&dir=asc") elements = [None] * 20 for i in range(0, 20): print 'finding by xpath', i elements[i] = WebDriverWait(driver, 10).until( lambda d: d.find_element_by_xpath('(//span[@class="offer-card__price__value"])[%d]' % (i + 1,))) for i in range(0, 20): print 'text', elements[i].text driver.quit()
def test_that_we_can_accept_a_profile(capabilities, webserver): profile1 = FirefoxProfile() profile1.set_preference("browser.startup.homepage_override.mstone", "") profile1.set_preference("startup.homepage_welcome_url", webserver.where_is('simpleTest.html')) profile1.update_preferences() profile2 = FirefoxProfile(profile1.path) driver = Firefox( capabilities=capabilities, firefox_profile=profile2) title = driver.title driver.quit() assert "Hello WebDriver" == title
def test_add_extension_web_extension_without_id(capabilities, webserver): current_directory = os.path.dirname(os.path.realpath(__file__)) root_directory = os.path.join(current_directory, '..', '..', '..', '..', '..') extension_path = os.path.join(root_directory, 'third_party', 'firebug', 'mooltipass-1.1.87.xpi') profile = FirefoxProfile() profile.add_extension(extension_path) driver = Firefox(capabilities=capabilities, firefox_profile=profile) profile_path = driver.firefox_profile.path extension_path_in_profile = os.path.join(profile_path, 'extensions', '[email protected]') assert os.path.exists(extension_path_in_profile) driver.quit()
def test_add_extension_legacy_extension(capabilities, webserver): current_directory = os.path.dirname(os.path.realpath(__file__)) root_directory = os.path.join(current_directory, '..', '..', '..', '..', '..') extension_path = os.path.join(root_directory, 'third_party', 'firebug', 'firebug-1.5.0-fx.xpi') profile = FirefoxProfile() profile.add_extension(extension_path) driver = Firefox(capabilities=capabilities, firefox_profile=profile) profile_path = driver.firefox_profile.path extension_path_in_profile = os.path.join(profile_path, 'extensions', '*****@*****.**') assert os.path.exists(extension_path_in_profile) driver.quit()
def __init__(self, base_url, query_params): self.__take_results_backup() options = Options() options.add_argument("--headless") try: self.driver=Chrome(options=options) except Exception as e: print(f'Error occured during Chrome driver : {e}') self.driver=Firefox() self.driver.get(base_url + query_params) # set up the next page element self.nextpage_element=self.driver.find_element_by_css_selector( ".pager-next a")
def get_air_data(positionsets): # Dictionary hourdata is for holding data, DataStructure like: # {'baiyunshan': [44, 5], 'haizhubaogang': [55, 6]} hourdata = {} # Calling selenium, need linux X browser = Firefox() browser.get(URL) # Added 10 seconds for waiting page for loading. time.sleep(10) # Click button one-by-one for position in positionsets: # After clicking, should re-get the page_source. browser.find_element_by_id(position).click() page_source = browser.page_source # Cooking Soup soup = BeautifulSoup(page_source, 'html.parser') # pm2.5 value would be something like xx 微克/立方米, so we need an regex for # matching, example: print int(pattern.match(input).group()) try: PM25 = int(pattern.match(soup.find('td',{'id': 'pmtow'}).contents[0]).group()) PM25_iaqi = int(pattern.match(soup.find('td',{'id': 'pmtow_iaqi'}).contents[0]).group()) PM10 = int(pattern.match(soup.find('td',{'id': 'pmten'}).contents[0]).group()) PM10_iaqi = int(pattern.match(soup.find('td',{'id': 'pmten_iaqi'}).contents[0]).group()) SO2 = int(pattern.match(soup.find('td',{'id': 'sotwo'}).contents[0]).group()) SO2_iaqi = int(pattern.match(soup.find('td',{'id': 'sotwo_iaqi'}).contents[0]).group()) NO2 = int(pattern.match(soup.find('td',{'id': 'notwo'}).contents[0]).group()) NO2_iaqi = int(pattern.match(soup.find('td',{'id': 'notwo_iaqi'}).contents[0]).group()) # Special notice the CO would be float value CO = float(floatpattern.match(soup.find('td',{'id': 'co'}).contents[0]).group()) CO_iaqi = int(pattern.match(soup.find('td',{'id': 'co_iaqi'}).contents[0]).group()) O3 = int(pattern.match(soup.find('td',{'id': 'othree'}).contents[0]).group()) O3_iaqi = int(pattern.match(soup.find('td',{'id': 'othree_iaqi'}).contents[0]).group()) hourdata_key = pinyin.get(position) hourdata[hourdata_key] = [] hourdata[hourdata_key].append(PM25) hourdata[hourdata_key].append(PM25_iaqi) hourdata[hourdata_key].append(PM10) hourdata[hourdata_key].append(PM10_iaqi) hourdata[hourdata_key].append(SO2) hourdata[hourdata_key].append(SO2_iaqi) hourdata[hourdata_key].append(NO2) hourdata[hourdata_key].append(NO2_iaqi) hourdata[hourdata_key].append(CO) hourdata[hourdata_key].append(CO_iaqi) hourdata[hourdata_key].append(O3) hourdata[hourdata_key].append(O3_iaqi) except ValueError, Argument: # won't add the data, simply ignore this position print "The argument does not contain numbers\n", Argument
def load_search_results(context_id=None): flashcards = load_flashcards_data() if context_id is not None: flashcards = flashcards[flashcards['context_id'] == context_id] to_process = list(flashcards[['term_id', 'term_name']].drop_duplicates().dropna(subset=['term_name']).values) random.shuffle(to_process) global BROWSER BROWSER = Firefox() try: result = [] for term_id, term_name in progress.bar(to_process): result.append(_load_search_results_apply(term_id, term_name)) return pandas.DataFrame(result) finally: BROWSER.quit()
def __init__(self): CrawlSpider.__init__(self) log.start(logfile="./log/szlib-%s.log" % strftime("%m%d-%H-%M",localtime(time())), loglevel=log.INFO,logstdout=False) log.msg("szlibspider start") print "szlibspider start" self.verificationErrors = [] self.selenium = selenium("localhost", 4444, "*firefox /usr/lib/firefox/firefox", "http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") ffdriver = Firefox() self.selenium.start(driver=ffdriver) # self.selenium.start() sel = self.selenium # sel.open("http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") ffdriver.get("http://www.szlib.gov.cn/libraryNetwork/selfLib/id-5.html") WebDriverWait(ffdriver,30).until(ajax_complete, "Timeout waiting page to load") #Wait for javscript to load in Selenium # time.sleep(20) # sel.wait_for_condition("condition by js", 20000); # print "ul/li visible? %s" % sel.is_element_present("//ul[@class='servicepointlist']") elements = ffdriver.find_elements_by_xpath("//div[@class='boxtext']/div[@class='filterbox_1']/div[@class='text tab_4_tit district_list']/a") num = "wijefowaeofjwejf SSL0011" selflibs_num = [] for district in elements[1:]: log.msg("%s selflibs:" % district.text) log.msg("==================") district.click() WebDriverWait(ffdriver,30).until(ajax_complete, "Timeout waiting to load selflibs") selflibs_elements = ffdriver.find_elements_by_xpath("//ul[@class='servicepointlist']/li[@class='item']") for selflib_ele in selflibs_elements: # num = selflib_ele.find_element_by_class_name("num").text num = selflib_ele.find_element_by_class_name("num").get_attribute("textContent") log.msg("num %s" % num) selflibs_num.append(num[-7:]) log.msg("numid %s" % num[-7:] ) log.msg("%s" % selflib_ele.find_element_by_class_name("title").get_attribute("textContent")) log.msg("%s" % selflib_ele.find_element_by_class_name("text").get_attribute("textContent")) log.msg("---------------") log.msg("------1---------") # ffdriver.quit() # numstr = unicode("编号","utf-8") # numstr = unicode(num,"utf-8") # log.msg("numstr is in num? %s" % (numstr in num)) # log.msg("%s,%s, %s" % (num,num[1], num[-7:])) for selflibnum in selflibs_num: selflib_url ="http://www.szlib.gov.cn/libraryNetwork/dispSelfLibBook/id-5/%s.html" % selflibnum log.msg("selflib url %s" % selflib_url) ffdriver.get(selflib_url) WebDriverWait(ffdriver,30).until(ajax_complete, "Timeout waiting to load booklist") categorys_elements = ffdriver.find_elements_by_xpath("//div[@class='boxtext']/div[@class='filterbox_1']/div[@class='text tab_4_tit category']/a") for category_ele in categorys_elements[1:]: log.msg("%s" % category_ele.text)
class ContentRetrieverUsingSelenium: def __init__(self, timeout): self.browser = Firefox() self.timeout = timeout def getContentOfPage(self, url): self.browser.get(url) time.sleep(self.timeout) page_source = self.browser.page_source page_source = page_source.encode('gbk', 'ignore') return (self.browser.current_url, BeautifulSoup(page_source)) def close(self): self.browser.close()
def setUp(self): if _CI: self.driver = self.sauce_chrome_webdriver() elif settings.SELENIUM is True: options = FirefoxOptions() options.add_argument('-headless') self.driver = Firefox(firefox_options=options) self.driver.implicitly_wait(10)
def open_browser(self): # Open Firefox to charter using Selenium browser = Firefox() browser.get('https://www.charter.com/browse/content/new-channel-lineup') # Click the "choose state" menu using Selenium wait = WebDriverWait('browser', 20) button = browser.find_element_by_css_selector('.drop-down-current') button.click() # # Identify all states in the list, read as text using Selenium list_item = browser.find_element_by_class_name('drop-down-list') states = list_item.text # print states # sel_st = raw_input('Type in 2 letter st abbreviation: ') def prompt(self)
def __init__(self): self.comment_root_url = "http://waimai.baidu.com/shopui/?qt=shopcomment&shop_id=" self.comment_root_path = "files/baiduwaimai_comments-%s.json" % datetime.now().strftime("%Y-%m-%d") self.browser = Firefox() self.ids = defaultdict(list) self.crawled_ids = [] self.crawled_id_filepath = "files/crawled_ids.txt" self.get_crawled_ids()
def setup_browser(): browser = Firefox() browser.set_window_size(1280, 800) # WORKAROUND for recordmydesktop bug browser.set_window_position(10, 10) yield browser browser.quit()
def __init__(self): self.start_page = START_PAGE self.end_page = END_PAGE self.weixin_url = REFER_FIRST self.driver = Firefox() self.client = MongoClient(HOST, PORT) self.collection = self.client[DB][COLLECTION] self.all_uids = self.uids
def __init__(self): ''' Constructor ''' self.url = 'http://www.jerryvarghese.com/Job-Search/ReqSearch.aspx?p=0&locID=121&loc=Saudi%20Arabia' #self.base_job_url = 'https://sjobs.brassring.com/TGWebHost/jobdetails.aspx?' self.browser = Firefox() self.first_page_search_opening_id = 'srchOpenLink' self.second_page_search_btn_id = 'ctl00_MainContent_submit2' self.next_link_id = 'yui-pg0-0-next-link'
def read_url(url): driver = Firefox(options=options) driver.maximize_window() driver.get(url) time.sleep(4) height = driver.execute_script("return document.body.scrollHeight") print(height) position = 0 while position < height: driver.execute_script(f"window.scrollTo(0, {position});") delta = random.randint(50, 500) position += delta duration = delta // 20 # print(height, position, delta, duration) time.sleep(duration) driver.close()
#! python3.8.2 from selenium.webdriver import Firefox from time import sleep url = 'https://curso-python-selenium.netlify.app/exercicio_02.html' navegador = Firefox() try: navegador.get(url) sleep(1) p = navegador.find_elements_by_tag_name('p') [esperado_texto, numero_esperado] = p[1].text.split(':') botao = navegador.find_element_by_id('ancora') valor_atual_p = '' # while f'Você ganhou:{numero_esperado}' != valor_atual_p: # botao.click() # p = navegador.find_elements_by_tag_name('p') # valor_atual_p = p[-1].text # print(valor_atual_p) while 'Você ganhou' not in valor_atual_p: botao.click() p = navegador.find_elements_by_tag_name('p') valor_atual_p = p[-1].text print(valor_atual_p) sleep(3) navegador.quit()
""" 1 - Check if the change occurs in span (focus, blus) 2 - Check if the change occurs in p (change) """ from selenium.webdriver import Firefox browser = Firefox() browser.get('https://selenium.dunossauro.live/aula_07_d') text_input = browser.find_element_by_tag_name('input') span = browser.find_element_by_tag_name('span') p = browser.find_element_by_tag_name('p') """ When input is clicked Then the text `está com foco` should be the content of `span` When span is clicked Then the text `está sem foco` should be the content of `span` """ text_input.click() assert 'está com foco' == span.text, 'Failed - span out of focus' span.click() assert 'está sem foco' == span.text, 'Failed - span in focus' """ Given that `p` content is `1` When `text` is sent in element `input` Then the text `está com foco` should be the content of `span` When span is clicked Then the text `está sem foco` should be the content of `span`
import base64, time downloadDir = './' options = Options() options.add_argument('--headless') options.add_argument('--disable-gpu') fp = FirefoxProfile() fp.set_preference("browser.download.folderList", 2) fp.set_preference("browser.download.dir", downloadDir) fp.set_preference("browser.download.manager.showWhenStarting", False) fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "attachment/csv") driverpath = './geckodriver' driver = Firefox(executable_path=driverpath, options=options, firefox_profile=fp) driver.get("https://www.boce.com") #前往這個網址 url_input = driver.find_element_by_class_name('el-input__inner') url_input.send_keys('www.tonnyshanghai.com') url_enter = driver.find_elements_by_class_name('el-button--primary')[1] url_enter.click() while(True): try: result_button = driver.find_element_by_css_selector('.result-wrap .detail-wrap .el-button') result_button.click() break except: pass
# Test data td={"url":"https://www.amazon.com","browser":"chrome"} from selenium.webdriver import Chrome,Firefox pathChrome = "../driver/chromedriver" pathFF ="../driver/geckodriver" if td["browser"]=="chrome": driver= Chrome(pathChrome) else: driver = Firefox(pathFF) driver.get(td["url"]) driver.fullscreen_window() driver.quit()
def run(): try: print(menu) choice = input( f"{COLORS.REDL} └──>{COLORS.GNSL} Выберите опцию {COLORS.GNSL}[ {COLORS.REDL}main_menu{COLORS.GNSL} ]{COLORS.GNSL}: " ) if choice == '1': show_banner(clear=True) from module.info_ip import dnsdumpster domain = input( f'{COLORS.REDL} └──>{COLORS.WHSL} Введите домен для поиска информации:{COLORS.GNSL} ' ) dnsdumpster(domain) elif choice == '2': from urllib.parse import urlparse show_banner(clear=True) print(f'\n Пример ввода ссылки: http://google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n [ + ] Headers :\n') response = requests.get(target, verify=True, timeout=10) for k, v in response.headers.items(): print(f' [ + ] {k} : {v}') run() elif choice == '3': show_banner(clear=True) dnslookup = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') dnslookup = 'https://api.hackertarget.com/dnslookup/?q=' + dnslookup info = requests.get(dnslookup) print(info.text) print(info.text) run() elif choice == '4': show_banner(clear=True) revers = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) reversedns = 'https://api.hackertarget.com/reversedns/?q=' + revers info = requests.get(reversedns) print(info.text) run() elif choice == '5': from grab import Grab from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options opts = Options() opts.headless = True br = Firefox(options=opts) show_banner(clear=True) print( f' {COLORS.WHSL}Примеры ввода запроса: {COLORS.GNSL}google.com, pornohub.com, ruvds.com \n' ) inp = input( f'{COLORS.REDL} ──> {COLORS.GNSL}Введите domain: {COLORS.WHSL}' ) print( f"\n {COLORS.GNSL}Ожидайте пока закончится сбор информации.{COLORS.WHSL} \n" ) br.get(f'https://viewdns.info/reversewhois/?q={inp}') try: all_list = br.find_elements_by_xpath( '/html/body/font/table[2]/tbody/tr[3]/td/font/table/tbody/tr' ) if not all_list: br.quit() print(f' Ничего не найдено по вашему запросу') else: for elem in all_list: domain, date, registrar = elem.find_elements_by_xpath( 'td') print( f' {domain.text}\t\t{date.text}\t\t{registrar.text}' ) print(f" {COLORS.REDL}={COLORS.WHSL}" * 70) print() br.quit() except: br.quit() elif choice == '6': show_banner(clear=True) print(f'\n Пример ввода ссылки: google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/findshareddns/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '7': show_banner(clear=True) print(f'\n Пример ввода ссылки: google.com, vsechastikino.ru \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/zonetransfer/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '8': import whois print(' Пример ввода: google.com \n') data = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите доменное имя: " ) test = whois.whois(data) show_banner(clear=True) print(test) elif choice == '9': show_banner(clear=True) print(f'\n Пример ввода ссылки: google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/geoip/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '10': show_banner(clear=True) print(f'\n Пример ввода ссылки: google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/reverseiplookup/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '11': show_banner(clear=True) print(f'\n Пример ввода ссылки: http://google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/nmap/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '12': show_banner(clear=True) print(f'\n Пример ввода ссылки: http://google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/subnetcalc/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '13': show_banner(clear=True) print(f'\n Пример ввода ссылки: http://google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/httpheaders/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '14': show_banner(clear=True) print(f'\n Пример ввода ссылки: http://google.com \n') target = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print('\n') request_urls = "https://api.hackertarget.com/pagelinks/?q=" request_url = request_urls url = request_url + target request = requests.get(url) print(request.text) run() elif choice == '15': show_banner(clear=True) host = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) print( f'\n{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Получение субдоменов цели...\n' ) if not virustotal_api: print( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Проблемы с ключом api, напишите разработчикам !" ) return url = 'https://www.virustotal.com/vtapi/v2/domain/report' params = { 'apikey': virustotal_api, 'domain': host, } response = requests.get(url, params=params) subdomains = response.json() for x in subdomains['subdomains']: print(x) run() elif choice == '16': requests.packages.urllib3.disable_warnings() show_banner(clear=True) domain = input( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Введите address: " ) if not cms_detect_api: print( f"{COLORS.GNSL} [ {COLORS.REDL}+{COLORS.GNSL} ] {COLORS.WHSL} Проблемы с ключом api, напишите разработчикам !" ) return payload = {'key': cms_detect_api, 'url': domain} cms_url = "https://whatcms.org/APIEndpoint/Detect" response = requests.get(cms_url, params=payload) cms_data = response.json() cms_info = cms_data['result'] if cms_info['code'] == 200: print(f' Detected CMS : {cms_info["name"]}') print(f' Detected Version : {cms_info["version"]}') print(f' Confidence : {cms_info["confidence"]}') else: print(cms_info['msg']) print(f' Detected CMS : {cms_info["name"]}') print(f' Detected Version : {cms_info["version"]}') run() elif choice == '0': import os print( f' {COLORS.GNSL}Благодарим вас за использование !!! Вы прекрасны.\n' ) os.system('pkill -9 -f osintsan.py') exit() elif choice == '99': show_banner(clear=True) return except KeyboardInterrupt: print("\nAborted!") exit()
import time from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options from gazpacho import Soup from tqdm import tqdm import numpy as np import pandas as pd options = Options() options.headless = True browser = Firefox(executable_path="/usr/local/bin/geckodriver", options=options) links = [] pages = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100, 101, 102, 200, 201, 202, 300, 301, 302 ] for page in tqdm(pages): url = f"https://boardgamegeek.com/browse/boardgame/page/{page}" browser.get(url) soup = Soup(browser.page_source) li = [ a.attrs["href"] for a in soup.find("a", { "class": "primary", "href": "boardgame" }) ] links.extend(li) time.sleep(1)
class TreeScraper(object): """Scrape Wikipedia's Special Category Tree page: https://en.wikipedia.org/wiki/Special:CategoryTree EXAMPLE USE: import wikiscraper scraper = wikiscraper.WikiTreeScraper() scraper.scrape(category='mathematics', search_depth=3, save='csv') """ def __init__(self, all_pages=True): self.all_pages = all_pages def _get_expand_buttons(self): """Return a list of expand buttons to click on.""" return self.browser.find_elements_by_xpath("//span[@title='expand']") def _expand_all_categories(self): """Expand all categories on page.""" self.depth = 0 self.df = pd.DataFrame(columns=['url', 'depth']) self.duplicated = 0 url_list = [] depth_list = [] html = self.browser.page_source soup = bs(html, 'html.parser') atag = soup.find_all('a', class_='CategoryTreeLabel') for a in atag: url_list.append(a['href']) depth_list.append(self.depth) self.depth += 1 while self.depth < self.search_depth: start = default_timer() time.sleep(30) expand_buttons = self._get_expand_buttons() time.sleep(30) for button in expand_buttons: time.sleep(.05) if button.is_displayed(): button.click() else: continue end = default_timer() print( f'depth of { self.depth } took {str(round((end-start)/60, 2))} minutes to open' ) html = self.browser.page_source soup = bs(html, 'html.parser') atag = soup.find_all('a', class_='CategoryTreeLabel') for a in atag: link = a['href'] if link not in url_list: url_list.append(a['href']) depth_list.append(self.depth) elif link in url_list: self.duplicated += 1 self.depth += 1 self.df = pd.DataFrame(list(zip(url_list, depth_list)), columns=['url', 'depth']) self._convert_utf8() if self.save == 'csv': start = default_timer() self._save_csv() end = default_timer() print( str(round((end - start) / 60, 2)) + f' minutes to save to csv') def _save_csv(self): """Save to a csv file""" # Save pages and categories to a 'seed_pages' dir self.df.to_csv(f'seed/{ self.category }_d{ self.depth }.csv', sep='\t', encoding='utf-8', index=False) def _convert_utf8(self): """Convert the url column to utf-8 encoding""" self.df['url'] = self.df['url'].map(unquote) def scrape(self, category, search_depth=3, save='csv'): """Scrape for either categories or all categories and pages""" self.category = category.replace(' ', '_') self.search_depth = search_depth self.save = save self.browser = Firefox() if self.all_pages == True: time.sleep(1) self.browser.get( f'https://en.wikipedia.org/wiki/Special:CategoryTree?target={ self.category }&mode=all&namespaces=&title=Special%3ACategoryTree' ) time.sleep(1) self._expand_all_categories() else: time.sleep(1) self.browser.get( f'https://en.wikipedia.org/wiki/Special:CategoryTree?target={ self.category }&mode=categories&namespaces=&title=Special%3ACategoryTree' ) time.sleep(1) self._expand_all_categories()
def _setup_firefox(): opts = Options() opts.set_headless() assert opts.headless browser = Firefox(executable_path=GECKODRIVER_PATH, options=opts) return browser
from selenium.webdriver import Firefox from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait import time webDriver = Firefox() webDriver.get("https://www.tradingview.com/#signin") #Log in to tradingview def login(exchange): #need two seperate tradingview accounts, premium if available, in order to prevent crashes, these are the accounts I created # you may use them if they work if exchange == "binance": tradingViewuserName = "******" else: tradingViewuserName = "******" tradingViewPassword = "******" inputusername = webDriver.find_element_by_name("username") inputusername.send_keys(tradingViewuserName) inputpassword = webDriver.find_element_by_name("password") inputpassword.send_keys(tradingViewPassword) inputpassword.submit() time.sleep(3) def get_trading_view_graph(interval, currency, exchange): webDriver.get("https://www.tradingview.com/chart/?symbol=" + exchange + ":" + currency) intervalSelector = webDriver.find_element_by_class_name("value-DWZXOdoK-") intervalSelector.click() intervals = webDriver.find_elements_by_class_name("item-2xPVYue0-")
class webDriverFirefox(): driver = None wait = None url = None browserRunning = False def __init__(self, url): # 브라우저 실행 self.url = url def driverRun(self): try: os.remove('./geckodriver.log') except: pass if (not (self.browserRunning)): options = Options() options.add_argument('-headless') self.driver = Firefox(executable_path='./geckodriver', options=options) self.wait = WebDriverWait(self.driver, timeout=10) self.driver.get(self.url) self.browserRunning = True return 'online' def driverQuit(self): if (self.browserRunning): self.driver.quit() self.browserRunning = False return 'offline' def driverControl(self, url): driver = self.driver wait = self.wait # wait = WebDriverWait(driver, timeout=10) driver.get(url) # wait.until(expected.visibility_of_element_located((By.NAME, 'q'))).send_keys('headless firefox' + Keys.ENTER) # wait.until(expected.visibility_of_element_located((By.CSS_SELECTOR, '#ires a'))).click() # print(driver.page_source) def googleTranslate(self, translateString): # 구글번역 driver = self.driver wait = self.wait driver.find_element_by_css_selector( 'textarea#source.orig.tlid-source-text-input.goog-textarea').clear( ) wait.until( expected.invisibility_of_element_located( (By.CSS_SELECTOR, 'span.tlid-translation.translation'))) # driver.get('https://translate.google.com/#auto|ko|{}'.format(translateString)) driver.find_element_by_css_selector( 'textarea#source.orig.tlid-source-text-input.goog-textarea' ).send_keys(translateString) try: wait.until( expected.visibility_of_element_located( (By.CSS_SELECTOR, 'span.tlid-translation.translation'))) except: html = driver.page_source driver.find_element_by_css_selector( 'textarea#source.orig.tlid-source-text-input.goog-textarea' ).clear() if (sv.select_one('div.result-error', BeautifulSoup(html, 'html.parser'))): return { "data1": sv.select_one('span.tlid-result-error', BeautifulSoup(html, 'html.parser')).text } html = driver.page_source driver.find_element_by_css_selector( 'textarea#source.orig.tlid-source-text-input.goog-textarea').clear( ) # driver.implicitly_wait(5) select1 = str( sv.select_one('div.homepage-content-wrap', BeautifulSoup(html, 'html.parser'))) # 번역창 분리 # 1 resultString1 = '' select4 = sv.select_one('span.tlid-translation.translation', BeautifulSoup(select1, 'html.parser')) resultString1 = str(select4).replace('<br/>', '<span>\n</span>') resultString1 = BeautifulSoup(resultString1, 'html.parser').text # print(resultString1) # 2 resultString2 = '' if ('<div class="gt-cd gt-cd-mbd gt-cd-baf" style="display: none;"' in select1): pass elif ('<div class="gt-cd gt-cd-mbd gt-cd-baf" style=""' in select1): select2 = str( sv.select_one('table.gt-baf-table', BeautifulSoup(select1, 'html.parser'))) select3 = sv.select( 'span.gt-cd-pos, span.gt-baf-cell.gt-baf-word-clickable', BeautifulSoup(select2, 'html.parser')) for data in select3: strData = str(data) if ('gt-baf-cell' in strData): resultString2 += "{}, ".format(data.text) elif ('gt-cd-pos' in strData): resultString2 = resultString2.rstrip(", ") resultString2 += "\n* {} *\n: ".format(data.text) resultString2 = resultString2.lstrip("\n") resultString2 = resultString2.rstrip(", ") # print(resultString2) return {"data1": resultString1, "data2": resultString2}
# headers = {'User-Agent': generate_user_agent()} # # response = requests.get(url, headers=headers) # print(response.text) from selenium.webdriver import Firefox, FirefoxProfile from selenium.webdriver.firefox.options import Options import os dir_path = os.path.dirname(os.path.realpath(__file__)) opts = Options() # opts.set_headless() profile = FirefoxProfile() profile.set_preference("javascript.enabled", False) browser = Firefox(firefox_profile=profile, options=opts, executable_path=os.path.join(dir_path, 'geckodriver')) print("Getting page...") browser.get( 'https://www.usnews.com/best-colleges/university-of-chicago-1774/paying') widget_class_name = 'hero-stats-widget-stats' print("Finding stats section...") stats_wrapper = browser.find_element_by_xpath( '//section[@class="hero-stats-widget-stats"]') print("Finding stats list region") stats_ul = stats_wrapper.find_element_by_tag_name('ul') stats_li_list = stats_ul.find_elements_by_tag_name('li') for li in stats_li_list: span_title = li.find_element_by_tag_name("span").text strong_content = li.find_element_by_tag_name("strong")
from selenium.webdriver import Firefox from selenium.webdriver.common.action_chains import ActionChains from time import sleep from pprint import pprint browser = Firefox(executable_path='./geckodriver') browser.get("https://www.diariooficial.ma.gov.br") #Code sleep(5) # Fechar modal Alert modal_close = browser.find_element_by_css_selector('a[role=button]') if modal_close: modal_close.click() # Pegar Div e do Select sleep(5) div_select = browser.find_element_by_css_selector('div .span10') # Pegar Combo Select select = div_select.find_element_by_xpath( '//select[@id="formPesq:comboCaderno"]') select.click() option_combo = select.find_element_by_xpath('//option[@value="JU"]') option_combo.click() # Set data in input input_data_ini = browser.find_element_by_xpath( '//input[@name="formPesq:calendarInic_input"]')
from selenium.webdriver import Firefox from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import Select from openpyxl import load_workbook browser = Firefox(executable_path='/opt/WebDriver/bin/geckodriver') def spreadsheet(column): wb = load_workbook('/home/johntg/Documents/columbia_WIP_11-7.xlsx') ws = wb["main"] data = [] mfnum = 'a' + str(column) upc = 'b' + str(column) department = 'c' + str(column) item = 'd' + str(column) manufacturer = 'e' + str(column) name = 'f' + str(column) cost = 'g' + str(column) price = 'h' + str(column) data.extend([ ws[mfnum].value, ws[upc].value, ws[department].value, ws[item].value, ws[manufacturer].value, ws[name].value, ws[cost].value, ws[price].value ])
def write_tf(self, filesize, threadnum): '''This function writes tfrecords. Input parameters are: filesize (number of images in one tfrecord), threadnum(thread id)''' options = tf.compat.v1.io.TFRecordOptions( tf.compat.v1.io.TFRecordCompressionType.GZIP) opts = Options() opts.set_headless() assert opts.headless #driver=PhantomJS() driver = Firefox(options=opts) if True: starttime = time.time() # randomly select a name of length=20 for tfrecords file. output_file_name = ''.join( random.choices(string.ascii_uppercase + string.digits, k=20)) + '.tfrecord' print('\nThread: ', threadnum, ' Started:', output_file_name) # data_arr contains the images of generated tables and all_table_categories contains the table category of each of the table # flag = 1 # while flag: # try: # data_arr,all_table_categories = self.generate_tables(driver, filesize, output_file_name) # flag = 0 # except: # flag = 1 data_arr, all_table_categories = self.generate_tables( driver, filesize, output_file_name) if (data_arr is not None): if (len(data_arr) == filesize): with tf.io.TFRecordWriter(os.path.join( self.outtfpath, output_file_name), options=options) as writer: for imgindex, subarr in enumerate(data_arr): arr = subarr[0] img = np.asarray(subarr[1][0], np.int64)[:, :, 0] colmatrix = np.array(arr[1], dtype=np.int64) cellmatrix = np.array(arr[2], dtype=np.int64) rowmatrix = np.array(arr[0], dtype=np.int64) bboxes = np.array(arr[3]) tablecategory = arr[4][0] seq_ex = self.generate_tf_record( img, cellmatrix, rowmatrix, colmatrix, bboxes, tablecategory, imgindex, output_file_name) writer.write(seq_ex.SerializeToString()) print('\nThread :', threadnum, ' Completed in ', time.time() - starttime, ' ', output_file_name, 'with len:', (len(data_arr))) print('category 1: ', all_table_categories[0], ', category 2: ', all_table_categories[1], ', category 3: ', all_table_categories[2], ', category 4: ', all_table_categories[3]) # except Exception as e: # print('Exception occurred in write_tf function for file: ',output_file_name) # traceback.print_exc() # self.logger.write(traceback.format_exc()) # print('Thread :',threadnum,' Removing',output_file_name) # os.remove(os.path.join(self.outtfpath,output_file_name)) driver.stop_client() driver.quit() exit()
from selenium.webdriver import Firefox from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.keys import Keys url = "https://selenium.dunossauro.live/caixinha" browser = Firefox() browser.get(url) caixa = browser.find_element_by_id('caixa') h1 = browser.find_element_by_tag_name('h1') ac = ActionChains(browser) def caixinha_colorida(key1, key2=None): ac.key_down(key1) if key2: ac.key_down(key2) ac.move_to_element(caixa) ac.pause(1) ac.click() ac.pause(1) ac.double_click() ac.pause(1) ac.move_to_element(h1) ac.key_up(key1) if key2: ac.key_up(key2)
#!python3 from selenium.webdriver import Firefox url = 'https://selenium.dunossauro.live/aula_09_a.html' browser = Firefox() browser.get(url) browser.implicitly_wait(30) btn = browser.find_element_by_css_selector('button') btn.click() sucesso = browser.find_element_by_id('finished') assert sucesso.text == 'Carregamento concluído'
for bs in f.find_all('a'): runnerurl.append(bs['href']) runner.append(bs['href']) else: runner.append(f.text.strip()) if len(runner) != 0: # this is to get a number to compare with results found against all users with the name given expectedfinish = runner[3] numofracesfromrace = runner[2] agerange = runner[4] if len(runnerurl) != 0: # paste ultraparticipant here URL = (BASEURL + runnerurl[0]) opts = Options() opts.add_argument('--headless') browser = Firefox(options=opts) browser.get(URL) time.sleep(1) acchead = browser.find_elements_by_class_name('accordion-heading') accordion = browser.find_elements_by_class_name('accordion-content') for num, runner in enumerate(acchead, start=0): runarray = runner.text.split('\n') run = Runner(runarray, URL, expectedfinish) races = accordion[num].find_elements_by_class_name('rowlines') print(run.url) for r in races: racelist = r.text.split('\n') #print(racelist) obj = Race(racelist) run.events.append(vars(obj)) complete = []
def get_browser(headless: bool = True, browser_class: int = 1, init_args: dict = None) -> Firefox: """ 获取一个浏览器 :param headless: :param browser_class: 浏览器种类,0是谷歌, 1 是火狐, 服务器端不能使用谷歌 :param init_args: 初始化字典 :return: """ """ selenium安装方法: pip3 install selenium firefox的headless浏览器 因为headless的浏览器的语言跟随操作系统,为了保证爬回来的数据是正确的语言, 这里必须设置浏览器的初始化参数, 注意,使用headless必须先安装对应浏览器正常的版本,然后再安装headless版本 比如火狐的headless 下载火狐的geckodriver驱动。(当前文件夹下已经有一个了)地址是: https://github.com/mozilla/geckodriver/releases 下载后解压是一个geckodriver 文件。拷贝到/usr/local/bin目录下,然后加上可执行的权限 sudo chmod +x /usr/local/bin/geckodriver chrome的headless浏览器 https://chromedriver.storage.googleapis.com/index.html?path=2.35/ 你也可以自行搜索chromedriver的下载地址,解压是个可执行文件,放到chrome的目录即可. 一般ubuntu下面,chrome的目录是/opt/google/chrome/ 据说使用root权限运行的话,chrome的headless浏览器会报异常.而firefox的headless浏览器不会! """ if browser_class == 1: profile = FirefoxProfile() profile.set_preference("intl.accept_languages", "zh-cn") if isinstance(init_args, dict): for k, v in init_args.items(): profile.set_preference(k, v) else: pass options = FirefoxOptions() options.add_argument("--headless") if headless: try: browser = Firefox(firefox_profile=profile, executable_path=firefox_driver, firefox_options=options) except Exception as e: title = "{} Firefox headless浏览器打开失败".format( datetime.datetime.now()) content = "错误原因是:{}".format(e) send_mail(title=title, content=content) logger.exception(e) raise e else: try: browser = Firefox( firefox_profile=profile, executable_path=firefox_driver, ) except Exception as e: title = "{} Firefox headless浏览器打开失败".format( datetime.datetime.now()) content = "错误原因是:{}".format(e) send_mail(title=title, content=content) logger.exception(e) raise e else: options = ChromeOptions() options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"]) if headless: options.add_argument("--headless") try: browser = Chrome(executable_path=chrome_driver, chrome_options=options) except Exception as e: title = "{} Chrome headless浏览器打开失败".format( datetime.datetime.now()) content = "错误原因是:{}".format(e) send_mail(title=title, content=content) logger.exception(e) raise e else: try: browser = Chrome(executable_path=chrome_driver, chrome_options=options) except Exception as e: title = "{} Chrome headless浏览器打开失败".format( datetime.datetime.now()) content = "错误原因是:{}".format(e) send_mail(title=title, content=content) # 这是我自定义的方法 logger.exception(e) raise e return browser
# Selenium com Python #05 - Procurando e interagindo com elementos p.I # Atributos (Globais, Css, Dom) # Find (id, class, name) # Elementos Web (input, form) from selenium.webdriver import Firefox driver = Firefox() url = "https://selenium.dunossauro.live/aula_05_a.html" driver.get(url) python = driver.find_element_by_id("python") haskell = driver.find_element_by_id("haskell") print(python.text) print(haskell.text)
def setUp(self): path = 'C:\\Automation\\geckodriver' self.driver = Firefox(executable_path=path) self.driver.implicitly_wait(10) self.driver.get('https://teatrocolon.org.ar/es') self.indexPage = PageIndex(self.driver)
from selenium.webdriver import Firefox import datetime driver = Firefox() driver.get("http://orteil.dashnet.org/experiments/cookie/") cookie = driver.find_element_by_id('cookie') store_elements = driver.find_elements_by_css_selector("#store div") check_time = datetime.datetime.now() + datetime.timedelta(seconds=10) to_reach = datetime.datetime.now() + datetime.timedelta(minutes=5) while datetime.datetime.now() < to_reach: cookie.click() if datetime.datetime.now() >= check_time: store_prices = driver.find_elements_by_css_selector("#store b") prices = [ (int("0" + price.text.strip().split('-')[-1].replace(",", "").strip()), idx) for idx, price in enumerate(store_prices) ] prices.sort(reverse=True) my_money = int("0" + driver.find_element_by_id( "money").text.strip().split('-')[-1].replace(",", "").strip()) for i in range(len(prices)): if my_money >= prices[i][0] and prices[i][0]: store_prices[prices[i][1]].click() break
def answer_stones_riddle(browser: webdriver.Firefox) -> None: input1 = browser.find_element_by_id("r1Input") input1.send_keys("rock") button1 = browser.find_element_by_id("r1Btn") button1.click()
def test_file_upload(self, application: str, ff_browser: webdriver.Firefox): # Run logout to clean session ff_browser.get(application + "/logout") # Open the login page ff_browser.get(application + "/login") # Get username and password elements on page username = ff_browser.find_element_by_name("username") password = ff_browser.find_element_by_name("password") # Get submit button element btnSubmit = ff_browser.find_element_by_xpath( "/html/body/div[2]/form/input") # Inject username and password of test user username.send_keys(username_test) password.send_keys(password_test) # Click on submit button btnSubmit.click() # Open the create faculty page ff_browser.get(application + "/create_faculty") # Get faculty name input facName = ff_browser.find_element_by_xpath( "/html/body/div[2]/form/p/input") # Send value for new faculty facName.send_keys(newFacTest) # Get save faculty button btnSaveFac = ff_browser.find_element_by_xpath( "/html/body/div[2]/form/input") # Click on submit button btnSaveFac.click() # Get title element titleSaved = ff_browser.find_element_by_xpath("/html/body/div[2]/h1") assert (titleSaved.text == "Manage Faculties:")
class TeatroColon(unittest.TestCase): def setUp(self): path = 'C:\\Automation\\geckodriver' self.driver = Firefox(executable_path=path) self.driver.implicitly_wait(10) self.driver.get('https://teatrocolon.org.ar/es') self.indexPage = PageIndex(self.driver) def test_donaciones(self): self.driver.find_element_by_css_selector('.nav-entradas').click() self.assertEqual( self.driver.find_element_by_css_selector( 'h1.page-header > span:nth-child(1)').text, 'DONACIONES') def test_busqueda_orquesta(self): self.indexPage.search('orquesta') self.driver.find_element_by_css_selector( 'div.item_search:nth-child(4) > span:nth-child(1) > h4:nth-child(1) > strong:nth-child(1) > a:nth-child(1)' ).click() self.assertEqual( self.driver.find_element_by_css_selector( 'h1.page-header > span:nth-child(1)').text, 'ORQUESTA FILARMÓNICA DE BUENOS AIRES') def test_noticias(self): self.driver.find_element_by_xpath( '/html/body/div[1]/header/div[2]/div/nav[2]/ul/li[9]/a').click() self.driver.find_element_by_css_selector('.link-vermas').click() self.driver.find_element_by_name('categories[23]').click() self.assertTrue( 'ISATC en Mar del Plata' in self.driver. find_element_by_partial_link_text('ISATC en Mar del Plata').text) def tearDown(self): self.driver.close() self.driver.quit()
from selenium.webdriver import Firefox from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.webdriver.support import expected_conditions as expected from selenium.webdriver.support.wait import WebDriverWait from selenium.common.exceptions import TimeoutException import time text = raw_input() if __name__ == "__main__": options = Options() options.add_argument('-headless') driver = Firefox(executable_path='geckodriver', firefox_options=options) try: driver.get('https://www.ncts.ie/1263') driver.find_element_by_xpath("//button[@id='CloseCookie']").click() driver.refresh() nctInput=driver.find_element_by_xpath("//input[@name='RegistrationID']") nctInput.clear() nctInput.send_keys(text+ Keys.ENTER ) WebDriverWait(driver, 10).until(expected.presence_of_element_located((By.ID, "confirmVehicleYes"))) driver.find_element_by_xpath("//input[@id='confirmVehicleYes']").send_keys(Keys.ENTER) WebDriverWait(driver, 10).until(expected.presence_of_element_located((By.ID, "tab3"))) print (driver.find_element_by_xpath('//*[@id="tab3"]/form[1]/div[2]/div[1]/div[1]/table/tbody/tr[1]/td').text) except TimeoutException as ex: print 'Invalid reg plate/Script crash' isrunning = 0 driver.quit() driver.quit()
def test_case_firefox(): path = ".\Driver\geckodriver.exe" driver = Firefox(executable_path=path) driver.get("http://www.theTestingWorld.com/testings") # Maximize browser driver.maximize_window() # Enter Data to the textbox driver.find_element_by_name("fld_username").send_keys("helloworld") driver.find_element_by_xpath("//input[@name='fld_email']").send_keys( "*****@*****.**") driver.find_element_by_name("fld_password").send_keys("abcd123") driver.find_element_by_name("fld_cpassword").send_keys("abcd123") driver.find_element_by_name("fld_username").clear() driver.find_element_by_name("fld_username").send_keys("abcd123")
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from time import sleep class SearchElement: def __init__(self, locator): self.locator = locator def __call__(self, webdriver): print(f'Searching for {self.locator}') if webdriver.find_elements(*self.locator): return True return False driver = Firefox() driver_wait = WebDriverWait(driver, 10, poll_frequency=0.5) driver.get('https://selenium.dunossauro.live/exercicio_10.html') driver.maximize_window() # preencher formulario driver.find_element_by_css_selector('input#todo-name').send_keys('Lavar o carro') driver.find_element_by_css_selector('textarea#todo-desc').send_keys('Usando agua e sabão e fechando a tonerira para n gastar agua') driver.find_element_by_css_selector('button#todo-submit').click() # moving Todo to doing locator = (By.CSS_SELECTOR, 'div.body_a fieldset div div') driver_wait.until( SearchElement(locator), 'Não achou a div' )
from selenium.webdriver import Firefox from time import sleep from urllib.parse import urlparse from json import loads url = 'http://selenium.dunossauro.live/aula_05.html' firefox = Firefox() firefox.get(url) def preenche_form(browser, nome, email, senha, telefone): browser.find_element_by_name("nome").send_keys(nome) browser.find_element_by_name("email").send_keys(email) browser.find_element_by_name("senha").send_keys(senha) browser.find_element_by_name("telefone").send_keys(telefone) browser.find_element_by_name("btn").click() sleep(2) estrutura = { 'nome': 'Miquéias', 'email': '*****@*****.**', 'senha': '124', 'telefone': '987654321' } preenche_form(firefox, **estrutura)
def setUp(self): self.driver = Firefox( executable_path='/Users/reginameshkova/Desktop/mine/geckodriver')
from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options opts = Options() browser = Firefox(options=opts) browser.get('https://bandcamp.com') #browser.find_element_by_class_name('play-btn').click() tracks = browser.find_elements_by_class_name('discover-item') #print(len(tracks)) #tracks[2].click() discover_results = browser.find_element_by_class_name('discover-result') print(tracks[2].text) browser.close() quit()