if browser.is_text_present(result): print('pass') else: print('[X] not pass') empty_username = '' empty_password = '' valid_username = '******' valid_password = '******' invalid_username = '******' invalid_password = '******' unregistered_username = '******' __testUrl = 'http://localhost/student_entrepreneur_funding_system/login.php' browser = Browser() # already support firefox browser.visit(__testUrl) print("test page:" + browser.title) # test login test_login('test login by empty', '', '', 'Username is empty') test_login('test login by empty username', '', password, 'Username is empty') test_login('test login by empty password', username, '', 'Password is empty') test_login('test login by unregistered username', unregistered_username, password, 'Username or password wrong') test_login('test login by invalid username', username, invalid_password, 'Username or password wrong') test_login('test login by invalid password', invalid_username, password, 'Username or password wrong') test_login('test login by valid username and password', valid_username, valid_password, 'Login successfully!')
def before_all(context): context.browser = Browser('phantomjs')
def setup_browser(): logger.info("Setting up zope...") try: world.zope = Browser("zope.testbrowser") except: logger.warn("Error starting up zope")
def __init__(self, passengers, from_time, from_station, to_station, number, seat_type, receiver_mobile, receiver_email): """定义实例属性,初始化""" # 乘客姓名 self.passengers = passengers # 起始站和终点站 self.from_station = from_station self.to_station = to_station # 乘车日期 self.from_time = from_time # 车次编号 self.number = number.capitalize() # 座位类型所在td位置 if seat_type == '商务座特等座': seat_type_index = 1 seat_type_value = 9 elif seat_type == '一等座': seat_type_index = 2 seat_type_value = 'M' elif seat_type == '二等座': seat_type_index = 3 seat_type_value = 0 elif seat_type == '高级软卧': seat_type_index = 4 seat_type_value = 6 elif seat_type == '软卧': seat_type_index = 5 seat_type_value = 4 elif seat_type == '动卧': seat_type_index = 6 seat_type_value = 'F' elif seat_type == '硬卧': seat_type_index = 7 seat_type_value = 3 elif seat_type == '软座': seat_type_index = 8 seat_type_value = 2 elif seat_type == '硬座': seat_type_index = 9 seat_type_value = 1 elif seat_type == '无座': seat_type_index = 10 seat_type_value = 1 elif seat_type == '其他': seat_type_index = 11 seat_type_value = 1 else: seat_type_index = 7 seat_type_value = 3 self.seat_type_index = seat_type_index self.seat_type_value = seat_type_value # 通知信息 self.receiver_mobile = receiver_mobile self.receiver_email = receiver_email # 新版12306官网主要页面网址 self.login_url = 'https://kyfw.12306.cn/otn/resources/login.html' self.init_my_url = 'https://kyfw.12306.cn/otn/view/index.html' self.ticket_url = 'https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc' # 浏览器驱动信息,驱动下载页:https://sites.google.com/a/chromium.org/chromedriver/downloads self.driver_name = 'chrome' self.driver = Browser(driver_name=self.driver_name)
class singleprice: fileName = 'single.csv' chromeDriverpath = 'C:\Program Files\Java\webdriver\chromedriver.exe' # Web driver path executable_path = {'executable_path': chromeDriverpath} # DO NOT change it browser = Browser('chrome', **executable_path) # DO NOT change it def __init__(self): #self.browser.visit(self.url) a = 1 def getinfo(self, url): time.sleep(2) try: self.browser.visit(url) innerhtml = self.browser.html soup = BeautifulSoup(innerhtml, "lxml") soup.replaceWith strTotal = '' for elem in soup.find_all('td'): str1 = elem.text str2 = str1.replace('\n', '') # remove \n str3 = str2.strip() # remove space strTotal = strTotal + str3 + ',' strTotal = strTotal + '\n' self.writeLog(strTotal) except Exception: None def writeLog(self, myNote): #time1=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # Open and rewite, add file from beginning with 'w', add file from the end with 'a' f1 = open(self.fileName, 'ab+') #f1.write(str(myNote)) #f1.write(str(myNote).encode('utf-8')) #string.replace(u’\xa0’, u’ ‘) f1.write(str(myNote).encode('gbk', 'ignore')) f1.write(str(myNote).encode('gbk', 'ignore')) f1.close() def getByCode(self, urlStart, urlEnd): url = '' # Number+Letter # Range from 0 to 9 for i in range(0, 10): url = str(i) for j in range(65, 91): print("getting id " + str(i) + chr(j)) url = urlStart + str(i) + chr(j) + urlEnd self.getinfo(url) # Letter+Number for i in range(65, 91): url = str(i) for j in range(0, 10): print("getting id " + chr(i) + str(j)) url = urlStart + chr(i) + str(j) + urlEnd self.getinfo(url) # Letter+Letter for i in range(65, 91): url = str(i) for j in range(65, 91): print("getting id " + chr(i) + chr(j)) url = urlStart + chr(i) + chr(j) + urlEnd self.getinfo(url) # Number+Number for i in range(0, 10): url = str(i) for j in range(0, 10): print("getting id " + str(i) + str(j)) url = urlStart + str(i) + str(j) + urlEnd self.getinfo(url)
from splinter.browser import Browser import os.path import time import sys import pandas as pd ########## running the browser in a headless mode ########## browser = Browser('firefox', headless=True) ########## execute individual html file ########## browser.visit('file://' + os.path.realpath(sys.argv[1])) if sys.argv[1].find('szvav') != -1: testcase = 2 else: testcase = 1 sim_tim = 0 while sim_tim < 3600 * 48: ########## record the data for simulation output ########## elements = browser.find_by_id("output") ########## extract the simulation time ########## div = elements[0] if div.value != '':
def setUpModule(): ns.browser = Browser('chrome')
from bs4 import BeautifulSoup from splinter.browser import Browser import pandas as pd import requests import html5lib executable_path = {'executable_path': 'chromedriver'} browser = Browser('chrome', **executable_path, headless=False) def mars_news(browser): url = "https://mars.nasa.gov/news/" browser.visit(url) browser.is_element_present_by_css("ul.item_list li.slide", wait_time=0.5) html = browser.html news_soup = BeautifulSoup(html, "html.parser") try: slide_element = news_soup.select_one("ul.item_list li.slide") slide_element.find("div", class_="content_title") news_title = slide_element.find("div", class_="content_title").get_text() news_paragraph = slide_element.find( "div", class_="article_teaser_body").get_text() except AttributeError: return None, None
def __init__(self): self.browser = Browser() self.browser.driver.set_page_load_timeout(5)
def scrape(): # Dependencies import pandas as pd from bs4 import BeautifulSoup as bs from splinter.browser import Browser import time #import requests #from selenium import webdriver executable_path = {'executable_path': 'chromedriver.exe'} Browser = Browser("chrome") # NASA Mars News! # visit website news_url = "https://mars.nasa.gov/news/" Browser.visit(news_url) # create html object and parse with bs news_html = Browser.html news_soup = bs(news_html, 'html.parser') # scrape the latest title and paragraph title = news_soup.find('div', class_='content_title') paragraph = news_soup.find('div', class_='article_teaser_body').get_text() title = title.text #paragraph = paragraph # JPL Mars Space Images! # visit website base_image_url = "https://www.jpl.nasa.gov" image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars" Browser.visit(image_url) # create html object and parse with beautifulsoup image_html = Browser.html image_soup = bs(image_html, 'html.parser') # scrape the feature image imgage_url = image_soup.find(id='full_image').get('data-fancybox-href') full_image = base_image_url + imgage_url # Mars Weather! # visit website base_weather_url = 'https://twitter.com/marswxreport?lang=en' weather_url = 'https://twitter.com/MarsWxReport/status/1233751572125028354' Browser.visit(weather_url) # create html object and parse with beautifulsoup time.sleep(1) weather_html = Browser.html weather_soup = bs(weather_html, 'lxml') # scrape the weather information weather = weather_soup.find('title') weather = weather.text # Mars Facts # visit the webset facts1_url = 'https://space-facts.com/mars/' # extract mars facts and make it a dataframe facts1 = pd.read_html(facts1_url) facts1_df = facts1[0] facts1_df_html = facts1_df.to_html() # visit the webset facts2_url = 'https://space-facts.com/mars/' # extract mars facts and make it a dataframe facts2 = pd.read_html(facts2_url) facts2_df = facts2[1] facts2_df_html = facts2_df.to_html() # Mars Hemispheres # visit the webse base_hemi_url = 'https://astrogeology.usgs.gov' hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' Browser.visit(hemi_url) # create html object and parse with beautifulsoup hemi_html = Browser.html hemi_soup = bs(hemi_html, 'html.parser') # scrape the hemisphere titles and images hemisphere_image_urls = [] hemi_container = hemi_soup.find('div', id='product-section') hemi_images = hemi_container.find_all('div', class_='item') for images in hemi_images: title = images.find('h3').text link = images.find('a')['href'] Browser.visit(base_hemi_url + link) soup = bs(Browser.html, 'html.parser') downloads = soup.find('div', class_='downloads') url = downloads.find('a')['href'] hemisphere_image_urls.append({'title': title, 'img_url': url}) # hemi1 hemi1 = hemisphere_image_urls[0] hemi1_title = hemi1["title"] hemi1_title hemi1 = hemisphere_image_urls[0] hemi1_img = hemi1["img_url"] hemi1_img # hemi2 hemi2 = hemisphere_image_urls[1] hemi2_title = hemi2["title"] hemi2_title hemi2 = hemisphere_image_urls[1] hemi2_img = hemi2["img_url"] hemi2_img # hemi3 hemi3 = hemisphere_image_urls[2] hemi3_title = hemi3["title"] hemi3_title hemi3 = hemisphere_image_urls[2] hemi3_img = hemi3["img_url"] hemi3_img # hemi4 hemi4 = hemisphere_image_urls[3] hemi4_title = hemi4["title"] hemi4_title hemi4 = hemisphere_image_urls[3] hemi4_img = hemi4["img_url"] hemi4_img # Dictionary mars_dict = { "news_title": title, "news_paragraph": paragraph, "featured_image_url": full_image, "weather": weather, "facts1": facts1_df_html, "facts2": facts2_df_html, "hemisphere_images_urls": hemisphere_image_urls, "hemi1_title": hemi1_title, "hemi1_img": hemi1_img, "hemi2_title": hemi2_title, "hemi2_img": hemi2_img, "hemi3_title": hemi3_title, "hemi3_img": hemi3_img, "hemi4_title": hemi4_title, "hemi4_img": hemi4_img, } return mars_dict
def test_should_raise_an_exception_when_browser_driver_is_not_found(self): with self.assertRaises(DriverNotFoundError): from splinter.browser import Browser Browser('unknown-driver')
target_app_url = "http://127.0.0.1:8000/crawler" from splinter.browser import Browser target_browser = Browser('firefox')
def __init__(self): # We presume that you've installed chrome driver as per https://splinter.readthedocs.io/en/latest/drivers/chrome.html . self.browser = Browser('chrome', headless=True) self.browser.visit('file://' + os.path.join(os.path.dirname(__file__), "data", 'DV-TTVedicNormal ==_ यूनिकोड परिवर्तित्र.html'))
def setUpClass(cls): cls.browser = Browser('zope.testbrowser')
def huoche(): global b b = Browser() b.visit(ticket_url) b.execute_script('alert("开始刷票喽~~~~")') sleep(2) b.get_alert().dismiss() while b.is_text_present(u"登录"): sleep(1) login() if b.url == initmy_url: break try: #跳回购票页面 b.visit(ticket_url) #加载查询信息 b.cookies.add({"_jc_save_fromStation": starts}) b.cookies.add({"_jc_save_toStation": ends}) b.cookies.add({"_jc_save_fromDate": dtime}) b.reload() i = 1 #循环点击预订 if order != 0: while b.url == ticket_url: b.find_by_text(u"查询").click() # b.find_by_text(ttype).click() if b.find_by_text(u"预订"): sleep(0.3) b.find_by_text(u"预订")[order - 1].click() print b.url if b.is_text_present(u"证件号码", wait_time=0.2): # print [ i.text for i in b.find_by_text(pa) ] b.find_by_text(pa)[1].click() else: b.execute_script('alert("似乎没有可预订选项")') b.get_alert().dismiss() pass else: while b.url == ticket_url: b.find_by_text(u"查询").click() if b.find_by_text(u"预订"): sleep(0.3) for i in b.find_by_text(u"预订"): i.click() sleep(0.1) if b.is_text_present(u"证件号码"): b.find_by_text(pa)[1].click() else: b.execute_script('alert("似乎没有可预订选项")') b.get_alert().dismiss() pass b.execute_script('alert("能做的都做了")') b.get_alert().dismiss() print u"能做的都做了.....不再对浏览器进行任何操作" except Exception: print u"出错了...."
def set_browser(): enable_selenium_specs_to_run_offline() world.browser = Browser()
def setUpClass(cls): cls.browser = Browser("chrome")
def setUpClass(cls): cls.browser = Browser()
import cssutils, time, queue, json import atexit from bs4 import BeautifulSoup from splinter.browser import Browser from splinter import exceptions import selenium.common import sys from selenium.webdriver.common.keys import Keys import time browser = Browser('firefox', headless=False) try: # visit site browser.visit("https://open.rocket.chat/") # log in manually for now because nobody gave me login info input("press return to continue once logged in") channels = ["general", "sandbox"] for channel in channels: # create output file rawfile = open("data/{}_raw.txt".format(channel), "w+") parsedfile = open("data/{}_parsed".format(channel), "w+") browser.visit("https://open.rocket.chat/channel/{}".format(channel)) time.sleep(10) # scroll up to load previous messages # i have not figured out a way to detect when the channel is completely loaded # mostly because i am lazy and it doesnt really matter # so i just put 300 as a safe number # if it's too low, increase it. if it's too high (takes too long) decrease it
def initial_setup(server): """ Launch the browser once before executing the tests. """ world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT') if world.LETTUCE_SELENIUM_CLIENT == 'local': browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome') if browser_driver == 'chrome': desired_capabilities = DesiredCapabilities.CHROME desired_capabilities['loggingPrefs'] = { 'browser': 'ALL', } elif browser_driver == 'firefox': desired_capabilities = DesiredCapabilities.FIREFOX else: desired_capabilities = {} # There is an issue with ChromeDriver2 r195627 on Ubuntu # in which we sometimes get an invalid browser session. # This is a work-around to ensure that we get a valid session. success = False num_attempts = 0 while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS: # Load the browser and try to visit the main page # If the browser couldn't be reached or # the browser session is invalid, this will # raise a WebDriverException try: world.browser = Browser( browser_driver, desired_capabilities=desired_capabilities) world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT) world.visit('/') except WebDriverException: LOGGER.warn("Error acquiring %s browser, retrying", browser_driver, exc_info=True) if hasattr(world, 'browser'): world.browser.quit() num_attempts += 1 else: success = True # If we were unable to get a valid session within the limit of attempts, # then we cannot run the tests. if not success: raise IOError( "Could not acquire valid {driver} browser session.".format( driver=browser_driver)) world.absorb(0, 'IMPLICIT_WAIT') world.browser.driver.set_window_size(1280, 1024) elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs': config = get_saucelabs_username_and_key() world.browser = Browser( 'remote', url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format( config['username'], config['access-key']), **make_saucelabs_desired_capabilities()) world.absorb(30, 'IMPLICIT_WAIT') world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT) elif world.LETTUCE_SELENIUM_CLIENT == 'grid': world.browser = Browser( 'remote', url=settings.SELENIUM_GRID.get('URL'), browser=settings.SELENIUM_GRID.get('BROWSER'), ) world.absorb(30, 'IMPLICIT_WAIT') world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT) else: raise Exception("Unknown selenium client '{}'".format( world.LETTUCE_SELENIUM_CLIENT)) world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT) world.absorb(world.browser.driver.session_id, 'jobid')
def __init__(self): self.driver_name = 'firefox' self.executable_path = 'E:/driver/geckodriver' self.driver = Browser(driver_name=self.driver_name, executable_path=self.executable_path) self.driver.visit(self.mainUrl)
# -*- coding: utf-8 -*- from splinter.browser import Browser import time # executable_path = {'executable_path': r'D:\webDriver\chromedriver.exe'} # b = Browser('chrome', **executable_path) b = Browser('firefox') b.visit('https://www.manhuaren.com/m76571/') time.sleep(30) print(b.html) # cs = b.cookies.all() cs = b.find_elements_by_class_name("TopstoryItem--experimentExpand") contentDivs = b.find_by_css("TopstoryItem--experimentExpand") print(len(contentDivs)) print(len(cs))
def setUpClass(cls): cls.browser = Browser() super(SplinterTestCase, cls).setUpClass()
#!/usr/bin/env python from splinter.browser import Browser import time url = "http://www.tvbts.com" with Browser('phantomjs') as browser: browser.visit(url) # login browser.fill('username', 'zhuliting') browser.fill('password', '123456') browser.find_by_xpath('//button[@type="submit"]').first.click() #browser.click_link_by_href('/milist.html') browser.find_by_xpath('//*[@id="mn_N4233"]/a').first.click() print browser.url l = len(browser.find_by_xpath('//div[@class="pg"]/a')) for i in range(l): browser.find_by_xpath('//div[@class="pg"]/a')[i].click() print browser.url # save screenshot file_name = "/home/zhuliting/png/" + str(i) + ".png" browser.screenshot(file_name) contents = browser.find_by_xpath('//*[@class="s xst"]') for content in contents: print content.value # if (len(browser.windows) > 1): # browser.windows[1].close()
- Access the link to add a now Poll - Add a new Poll with your choices - Save the Poll - Find the link of the Poll inserted before - Choice the "delete option" and confirm delete in the next page - Get the info from the operation This example will works perfectly if you completed the second part of the django tutorial: https://docs.djangoproject.com/en/dev/intro/tutorial02/ For more information, just check: https://docs.djangoproject.com/en/dev/intro/tutorial01/ NOTE: This was tested when the version of the tutorial was for Django 1.3 """ from splinter.browser import Browser browser = Browser() # You may change this url to the current url from your Poll App browser.visit('http://127.0.0.1:8000/admin/') # You may change the username and password too browser.fill('username', 'douglas') browser.fill('password', '123456') # do login browser.find_by_css('.submit-row input').first.click() # add a new poll browser.find_link_by_href('polls/poll/add/').first.click() browser.fill('question', 'Does this test is completed successfully?') # if you follow the instructions from the tutorial, this is will show # the datetime options from the poll browser.find_by_css('.collapse-toggle').first.click() datetime_fields = browser.find_by_css('.datetimeshortcuts')
def start(self): self.driver=Browser(driver_name=self.driver_name,executable_path=self.executable_path) self.driver.driver.set_window_size(1400, 1000) self.login() # sleep(1) self.driver.visit(self.ticket_url) try: print (u"购票页面开始...") # sleep(1) # 加载查询信息 self.driver.cookies.add({"_jc_save_fromStation": self.starts}) self.driver.cookies.add({"_jc_save_toStation": self.ends}) self.driver.cookies.add({"_jc_save_fromDate": self.dtime}) self.driver.reload() count=0 if self.order!=0: while self.driver.url==self.ticket_url: self.driver.find_by_text(u"查询").click() sleep(2) count += 1 print (u"循环点击查询... 第 %s 次" % count) # sleep(1) try: self.driver.find_by_text(u"预订")[self.order - 1].click() except Exception as e: print (e) print (u"还没开始预订") continue else: while self.driver.url == self.ticket_url: self.driver.find_by_text(u"查询").click() count += 1 print (u"循环点击查询... 第 %s 次" % count) # sleep(0.8) try: # for i in self.driver.find_by_text(u"预订"): # print(i) # i.click() # sleep(1) #查询符合条件的预订 for t in self.driver.find_by_xpath("//*[@id='queryLeftTable']/tr/td/div/div[3]/strong[1]"): tl = t.text.split(":") car=t.find_by_xpath("../..//div[1]/a")[0] #符合时间和车次 if (self.date_tic <= int(tl[0]) and car.text[0] in self.car_types and self.data_tic_end >=int (tl[0])): ti_id = t.find_by_xpath("../../../..")[0]["id"] elem = self.driver.find_by_xpath("//*[@id='" + ti_id + "']/td[13]/a") if len(elem) > 0: elem[0].click() sleep(1) if count > 1000: print("超过最大次数") break except Exception as e: print (e) print (u"还没开始预订 %s" %count) continue print (u"开始预订...") # sleep(3) # self.driver.reload() sleep(1) print (u'开始选择用户...') for user in self.users: self.driver.find_by_text(user).last.click() print (u"提交订单...") sleep(1) # self.driver.find_by_text(self.pz).click() # self.driver.find_by_id('').select(self.pz) # # sleep(1) # self.driver.find_by_text(self.xb).click() # sleep(1) #提交订单 self.driver.find_by_id('submitOrder_id').click() # print u"开始选座..." # self.driver.find_by_id('1D').last.click() # self.driver.find_by_id('1F').last.click() sleep(1.5) print (u"确认选座...") self.driver.find_by_id('qr_submit_id').click() except Exception as e: print (e)
def start(self): self.driver = Browser(driver_name=self.driver_name, executable_path=self.executable_path) self.driver.driver.set_window_size(1400, 1000) self.login() # sleep(1) self.driver.visit(self.ticket_url) try: print u"购票页面开始..." # sleep(1) # 加载查询信息 self.driver.cookies.add({"_jc_save_fromStation": self.starts}) self.driver.cookies.add({"_jc_save_toStation": self.ends}) self.driver.cookies.add({"_jc_save_fromDate": self.dtime}) self.driver.reload() count = 0 if self.order != 0: while self.driver.url == self.ticket_url: self.driver.find_by_text(u"查询").click() count += 1 print u"循环点击查询... 第 %s 次" % count # sleep(1) try: self.driver.find_by_text(u"预订")[self.order - 1].click() except Exception as e: print e print u"还没开始预订" continue else: while self.driver.url == self.ticket_url: self.driver.find_by_text(u"查询").click() count += 1 print u"循环点击查询... 第 %s 次" % count # sleep(0.8) try: for i in self.driver.find_by_text(u"预订"): i.click() sleep(1) except Exception as e: print e print u"还没开始预订 %s" % count continue print u"开始预订..." # sleep(3) # self.driver.reload() sleep(1) print u'开始选择用户...' for user in self.users: self.driver.find_by_text(user).last.click() print u"提交订单..." sleep(1) # self.driver.find_by_text(self.pz).click() # self.driver.find_by_id('').select(self.pz) # # sleep(1) # self.driver.find_by_text(self.xb).click() # sleep(1) self.driver.find_by_id('submitOrder_id').click() # print u"开始选座..." # self.driver.find_by_id('1D').last.click() # self.driver.find_by_id('1F').last.click() sleep(1.5) print u"确认选座..." self.driver.find_by_id('qr_submit_id').click() except Exception as e: print e
def before_all(context): django.setup() context.test_runner = DiscoverRunner() context.test_runner.setup_test_environment() context.browser = Browser('chrome', headless=True)
from time import sleep import sched import datetime def buy_ticket(): while 1: b.find_by_text('查询').click() sleep(.01) b.find_by_text('预订')[5].click() sleep(.01) b.find_by_xpath('//*[@id="normalPassenger_0"]')[1].click() b.find_by_xpath('//*[@id="dialog_xsertcj_ok"]').click() url = 'https://kyfw.12306.cn/otn/login/init' b = Browser(driver_name="chrome") b.visit(url) b.fill("loginUserDTO.user_name", "hejie009") input('Ready? ') run_at = datetime.datetime(2018, 1, 29, 12, 59, 59).timestamp() print('Now:', time.time(), time.strftime("%H:%M:%S"), '\nRun:', run_at) s = sched.scheduler() s.enterabs(run_at, 1, buy_ticket) s.run() # //*[@id="normalPassenger_0"] # //*[@id="dialog_xsertcj_ok"] # student ticket ok
try: browser.quit() except: pass browser = Browser(driver) if __name__ == '__main__': # ABSPATH = os.path.dirname(os.path.abspath(sys.argv[0])) # os.environ['PATH'] += ';%s\\Tesseract-OCR' % ABSPATH # system('path') # system('pause') initLogger() driver = 'firefox' try: browser = Browser('firefox') except: try: browser = Browser('chrome') except: logging.getLogger().fatal( 'No browser driver!!! Please make sure that \'chromedriver.exe\' does exist.' ) exit(0) driver = 'chrome' main() logging.getLogger().info( 'All lessons have been chosen successfully, congratulations!!!') browser.quit() system('PAUSE')