Beispiel #1
0
    def setUpClass(cls):
        super(LiveTestCase, cls).setUpClass()

        options = Options()
        options.headless = True

        cls.selenium = WebDriver(options=options)
        cls.selenium.implicitly_wait(10)
Beispiel #2
0
def get_firefox_headless():
    # https://selenium-python.readthedocs.io/faq.html#how-to-auto-save-files-using-custom-firefox-profile
    fp = webdriver.FirefoxProfile()
    content_types = "text/plain,text/csv,application/csv,application/pdf"
    fp.set_preference("browser.download.folderList", 2)
    fp.set_preference("browser.download.manager.showWhenStarting", False)
    fp.set_preference("browser.download.dir", datadir)
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", content_types)
    fp.set_preference("browser.helperApps.neverAsk.openFile", content_types)
    fp.set_preference("plugin.scan.plid.all", False)
    fp.set_preference("plugin.scan.Acrobat", "99.0")
    fp.set_preference("pdfjs.disabled", True)
    options = Options()
    options.headless = True
    # options.log.level = "trace"
    driver = webdriver.Firefox(
        options=options,
        firefox_profile=fp,
        executable_path="/usr/local/bin/geckodriver",
        service_log_path=os.path.devnull,
        # service_log_path="/code/firefox.log",
    )
    return driver
Beispiel #3
0
def _local_browser_class(browser_name):
    """
    Returns class, kwargs, and args needed to instantiate the local browser.
    """

    # Log name of local browser
    LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name)

    # Get class of local browser based on name
    browser_class = BROWSERS.get(browser_name)
    headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true'
    if browser_class is None:
        raise BrowserConfigError(
            u"Invalid browser name {name}.  Options are: {options}".format(
                name=browser_name, options=", ".join(list(BROWSERS.keys()))))
    else:
        if browser_name == 'firefox':
            # Remove geckodriver log data from previous test cases
            log_path = os.path.join(os.getcwd(), 'geckodriver.log')
            if os.path.exists(log_path):
                os.remove(log_path)

            firefox_options = FirefoxOptions()
            firefox_options.log.level = 'trace'
            if headless:
                firefox_options.headless = True
            browser_args = []
            browser_kwargs = {
                'firefox_profile': _firefox_profile(),
                'options': firefox_options,
            }

            firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH')
            firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG')
            if firefox_path and firefox_log:
                browser_kwargs.update({
                    'firefox_binary': FirefoxBinary(
                        firefox_path=firefox_path, log_file=firefox_log)
                })
            elif firefox_path:
                browser_kwargs.update({
                    'firefox_binary': FirefoxBinary(firefox_path=firefox_path)
                })
            elif firefox_log:
                browser_kwargs.update({
                    'firefox_binary': FirefoxBinary(log_file=firefox_log)
                })

        elif browser_name == 'chrome':
            chrome_options = ChromeOptions()
            if headless:
                chrome_options.headless = True

            # Emulate webcam and microphone for testing purposes
            chrome_options.add_argument('--use-fake-device-for-media-stream')

            # Bypasses the security prompt displayed by the browser when it attempts to
            # access a media device (e.g., a webcam)
            chrome_options.add_argument('--use-fake-ui-for-media-stream')

            browser_args = []
            browser_kwargs = {
                'options': chrome_options,
            }
        else:
            browser_args, browser_kwargs = [], {}

        return browser_class, browser_args, browser_kwargs
Beispiel #4
0
def Get_HTML(URL, mode=1, IP_proxy='', flag_return_driver=0, driver=False):
    '''
		Функция должна: 
			получать страницу по переданому URL 
		Пременяет один из указанных способов, режимов, :
			Возможные способы, режимы, получения:
				1 - Библиотека requests:	
					контроль заголовков
					передача строки юзерагента
					КУКИ
				2 - Селениум
					Очень медлено!

				3 - Библиотека Splinter
					????????????
				
				4 - Библиотека MechanicalSoup
					????????????				

				5 - Библиотека RoboBrowser
					????????????

				6 - Библиотека Mechanize
						- не выполняет Javascript на страницах, которые он просматривает (проверить)

				7 - Библиотека Scrapy
					??????????????

		Возврт значений:
			если mode=1:
				пользователь имеет возможность получить экземпляр браузера для дальнейшего использования
				 вне этой ф-ции, но тогда он отвечает за закрытие этого экземпляра браузера. 
				Есть возможность вернут в ф-цию экземпляра браузера
	'''

    # Проверка на корректность полученных данных
    if type(URL) != str:
        print('You must input only str')
        return False

    if mode == 1:
        # print('You choso Selenium:')

        if flag_return_driver == 0 or driver == False:

            r = tkinter.Tk()  # получаем объект для доступа к параметрам экрана

            # выбираю версию geckodriver в зависимости от разрядности Windows
            name_file_geckodriver = general_functions.choose_geckodriver_file()

            pathDriver = os.path.dirname(
                os.path.abspath(__file__)) + "\\" + name_file_geckodriver
            opts = Options()
            opts.set_preference(
                "dom.webdriver.enabled",
                False)  # скрывает то что браузер управляеться автоматически
            opts.headless = False
            opts.add_argument(
                '-width=' +
                str(r.winfo_screenwidth() / 2))  # Устанавливаем ширину окна
            opts.add_argument(
                '-height=' +
                str(r.winfo_screenheight() / 1.3))  # Устанавливаем высоту окна

            driver = webdriver.Firefox(executable_path=pathDriver,
                                       options=opts)
            driver.set_window_position(r.winfo_screenwidth() / 2, 0)

            if IP_proxy:
                webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
                    "httpProxy": IP_proxy,
                    "ftpProxy": IP_proxy,
                    "sslProxy": IP_proxy,
                    "proxyType": "MANUAL",
                }
        try:
            driver.get(URL)

            try:
                WebDriverWait(
                    driver,
                    5).until(lambda driver: driver.find_elements_by_xpath(
                        "//*[.='IP адрес']"))
            except Exception as errMess:
                pass
                # print('Элемент не найден')
                # print(errMess)

            html = driver.page_source

        # Оброботка исключний:
        except Exception as errMess:
            print('Текущий URL недоступен')
            html = False

        # Вывод результатов в зависимости от значения flag_return_driver
        if flag_return_driver and html:
            arr_result = [html, driver]
            return arr_result

        # driver.close()	# закрываю браузер
        driver.quit()  # закрываю браузер
        return html

    elif mode == 2:
        print('You choso Requests lib')
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
        }
        # response = requests.get(url,headers=headers,proxies=proxies,timeout=_timeout,verify=False)
        response = requests.get(URL, headers=headers)
        response.encoding = 'utf-8'
        html = response.text

    elif mode == 3:

        print('You choso Splinter')

    elif mode == 4:

        print('You choso MechanicalSoup')

    elif mode == 5:

        print('You choso RoboBrowser')

    elif mode == 6:

        print('You choso Mechanize')

    elif mode == 7:

        print('You choso Scrapy')
def main():
    print("Main Executed")
    #%%
    site = 'https://www.wsj.com/market-data/stocks/marketsdiary'

    # options = webdriver.ChromeOptions()
    # options.add_argument('headless')
    # options.add_argument('--disable-gpu')
    # options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # driver=webdriver.Chrome(executable_path='E:\Trading\Quant\chromedriver.exe', options=options)
    display = Display(visible=0, size=(1366, 768))

    options = Options()

    fire_profile = webdriver.FirefoxProfile()
    fire_profile.set_preference("browser.cache.disk.enable", False)
    fire_profile.set_preference("browser.cache.memory.enable", False)
    fire_profile.set_preference("browser.cache.offline.enable", False)
    fire_profile.set_preference("network.http.use-cache", False)

    options.headless = True

    display.start()
    driver = webdriver.Firefox(options=options, firefox_profile=fire_profile)

    driver.get(site)
    delay = 30
    html = driver.page_source
    soup = BeautifulSoup(html, "lxml")
    #table=soup.find_all('tbody')
    #table=table[0]
    div = soup.find('div', id='root')
    df = pd.read_html(str(soup))[0]

    driver.close()
    driver.quit()
    display.stop()

    df_NYSE = df.iloc[:14, :2]
    df_NYSE.columns = ['Data', 'Value']
    df_NASDAQ = df.iloc[14:25, :2]
    df_NASDAQ.columns = ['Data', 'Value']
    #%%
    #------------get data from AAII sentiment site
    url = 'https://www.aaii.com/sentimentsurvey/sent_results?'
    page = requests.get(url)
    soup = BeautifulSoup(page.content, 'html.parser')

    for tr in soup.find_all('tr')[1:2]:
        tds = tr.find_all('td')
        bulls = tds[1].text
        neutral = tds[2].text
        bears = tds[3].text

    bulls = float(bulls.replace('%', ''))
    neutral = float(neutral.replace('%', ''))
    bears = float(bears.replace('%', ''))

    AAII_NET_BULLS = bulls - bears - neutral

    NYSE_UPV = df_NYSE['Value'][6] / 1000000

    NYSE_DNV = df_NYSE['Value'][7] / 1000000

    NYSE_ADV = df_NYSE['Value'][1]

    NYSE_DEC = df_NYSE['Value'][2]

    ticker = wb.DataReader('^GSPC', data_source='yahoo')
    SPX = ticker[-1:]['Close'][0]

    NYSE_TOTAL = df_NYSE['Value'][0]

    NYSE_LOWS = 100 * df_NYSE['Value'][5] / NYSE_TOTAL

    NYSE_HIGHS = 100 * df_NYSE['Value'][4] / NYSE_TOTAL

    ticker = wb.DataReader('^VIX', data_source='yahoo')
    VIX = ticker[-1:]['Close'][0]

    today_data = {
        'UPV': NYSE_UPV,
        'DNV': NYSE_DNV,
        'ADV': NYSE_ADV,
        'DEC': NYSE_DEC,
        'SPX': SPX,
        'Lows': NYSE_LOWS,
        'Highs': NYSE_HIGHS,
        'VIX': VIX,
        'AAII': AAII_NET_BULLS
    }
    today = pd.to_datetime('today').tz_localize('Asia/Singapore').tz_convert(
        'US/Eastern')
    today = today.replace(tzinfo=None,
                          hour=0,
                          minute=0,
                          second=0,
                          microsecond=0)

    nyse = mcal.get_calendar('NYSE')
    is_holiday = nyse.valid_days(start_date=today, end_date=today)

    if len(is_holiday) > 0:
        #take yesterday's CSV and add to it
        df_raw_data = pd.read_csv('raw data.csv', index_col=0)
        #append today's data to yesterday's
        df_raw_data = df_raw_data.append(
            pd.DataFrame(today_data, index=[today]))

        df_raw_data = df_raw_data.reset_index()
        df_raw_data = df_raw_data.rename(columns={'index': 'Date'})
        #save a copy of today's and update the main csv to be added to tomorrow

        df_raw_data.to_csv(str(today.date()) + ' raw data.csv', index=False)
        df_raw_data.to_csv('raw data.csv', index=False)

        #update SPX close
        spx_price = {'Last Price': SPX}
        df_spx = pd.read_csv('SPX 1930 Daily.csv', index_col=0)
        df_spx = df_spx.append(pd.DataFrame(spx_price, index=[today]))
        df_spx = df_spx.reset_index()
        df_spx = df_spx.rename(columns={'index': 'Date'})
        df_spx.to_csv('SPX 1930 Daily.csv', index=False)
    print("Main Complete")
Beispiel #6
0
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
import time, csv
from selenium.webdriver.common.by import By

options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
# driver = webdriver.Firefox ()
driver.implicitly_wait(10)

driver.get("https://www.ydxxt.com/")

elem_user = driver.find_element_by_name("userid")
elem_user.clear()
elem_user.send_keys("150881732061")
elem_pwd = driver.find_element_by_name("psw")
elem_pwd.send_keys("1y20050917@1")
time.sleep(5)
elem_pwd.send_keys(Keys.RETURN)
time.sleep(3)
driver.find_element_by_css_selector('input[value="进入系统"]').click()
time.sleep(3)
driver.find_element_by_css_selector("#A2").click()
driver.find_element_by_css_selector(
    ".WEBItc_list_con1 > ul:nth-child(1) > li:nth-child(9) > a:nth-child(1)"
).click()
time.sleep(2)
Beispiel #7
0
    def __init__(self,
                 client="firefox",
                 username="******",
                 proxy=None,
                 command_executor=None,
                 loadstyles=False,
                 profile=None,
                 headless=False,
                 autoconnect=True,
                 logger=None,
                 extra_params=None,
                 chrome_options=None,
                 executable_path=None,
                 script_timeout=60,
                 element_timeout=30,
                 license_key=None):
        """Initialises the webdriver"""

        self.logger = logger or self.logger
        self.license_key = license_key
        extra_params = extra_params or {}

        if profile is not None:
            self._profile_path = profile
            self.logger.info("Checking for profile at %s" % self._profile_path)
            if not os.path.exists(self._profile_path):
                self.logger.critical("Could not find profile at %s" % profile)
                raise WhatsAPIException("Could not find profile at %s" %
                                        profile)
        else:
            self._profile_path = None

        self.client = client.lower()
        if self.client == "firefox":
            if self._profile_path is not None:
                self._profile = webdriver.FirefoxProfile(self._profile_path)
            else:
                self._profile = webdriver.FirefoxProfile()
            if not loadstyles:
                # Disable CSS
                self._profile.set_preference('permissions.default.stylesheet',
                                             2)
                # Disable images
                self._profile.set_preference('permissions.default.image', 2)
                # Disable Flash
                self._profile.set_preference(
                    'dom.ipc.plugins.enabled.libflashplayer.so', 'false')
            if proxy is not None:
                self.set_proxy(proxy)

            options = Options()

            if headless:
                options.headless = True

            options.profile = self._profile

            capabilities = DesiredCapabilities.FIREFOX.copy()
            capabilities['webStorageEnabled'] = True

            self.logger.info("Starting webdriver")
            if executable_path is not None:
                executable_path = os.path.abspath(executable_path)

                self.logger.info("Starting webdriver")
                self.driver = webdriver.Firefox(
                    capabilities=capabilities,
                    options=options,
                    executable_path=executable_path,
                    **extra_params)
            else:
                self.logger.info("Starting webdriver")
                self.driver = webdriver.Firefox(capabilities=capabilities,
                                                options=options,
                                                **extra_params)

        elif self.client == "chrome":
            self._profile = webdriver.ChromeOptions()
            if self._profile_path is not None:
                self._profile.add_argument("--user-data-dir=%s" %
                                           self._profile_path)
            if proxy is not None:
                self._profile.add_argument('--proxy-server=%s' % proxy)
            if headless:
                self._profile.add_argument('--headless')
                self._profile.add_argument('--disable-gpu')
                self._profile.add_argument('--remote-debugging-port=9222')
                self._profile.add_argument(
                    '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
                )
            if chrome_options is not None:
                for option in chrome_options:
                    self._profile.add_argument(option)

            if executable_path is not None:
                self.logger.info("Starting webdriver")
                self.driver = webdriver.Chrome(executable_path=executable_path,
                                               chrome_options=self._profile,
                                               **extra_params)
            else:
                self.logger.info("Starting webdriver")
                self.driver = webdriver.Chrome(chrome_options=self._profile,
                                               **extra_params)

        elif client == 'remote':
            if self._profile_path is not None:
                self._profile = webdriver.FirefoxProfile(self._profile_path)
            else:
                self._profile = webdriver.FirefoxProfile()
            capabilities = DesiredCapabilities.FIREFOX.copy()
            self.driver = webdriver.Remote(command_executor=command_executor,
                                           desired_capabilities=capabilities,
                                           **extra_params)

        else:
            self.logger.error("Invalid client: %s" % client)
        self.username = username
        self.wapi_functions = WapiJsWrapper(self.driver, self)

        self.driver.set_script_timeout(script_timeout)
        self.element_timeout = element_timeout

        if autoconnect:
            self.connect()
Beispiel #8
0
    def search_by_hashtag(self, hashtag, count=10):
        import requests
        from browsermobproxy import Server
        import psutil
        import json
        import time
        import json
        from selenium import webdriver
        from selenium.webdriver.firefox.options import Options

        import string
        import random

        for proc in psutil.process_iter():
            # check whether the process name matches
            if proc.name() == "browsermob-proxy":
                proc.kill()

        dict = {'port': 8090}
        server = Server(path=self.browsermobDirectory, options=dict)
        # "browsermob-proxy/bin/browsermob-proxy"
        server.start()
        time.sleep(1)
        proxy = server.create_proxy()
        time.sleep(1)

        # Firefox selenium stuff
        profile = webdriver.FirefoxProfile()
        selenium_proxy = proxy.selenium_proxy()
        profile.set_proxy(selenium_proxy)
        options = Options()
        if self.headless == True:
            options.headless = True
        driver = webdriver.Firefox(firefox_profile=profile, options=options)

        # Browsermob-capture
        proxy.new_har("list")
        driver.get("https://www.tiktok.com/tag/" + hashtag + "?langCountry=en")
        data = proxy.har

        hashtagId = None

        # Assigns signature and hashtagID
        for element in data['log']['entries']:
            if "https://m.tiktok.com/share/item/list?" in element['request'][
                    'url'] or "https://www.tiktok.com/share/item/list?" in element[
                        'request']['url']:
                hashtagId = element['request']['queryString'][1]['value']
                self.signature = element['request']['queryString'][6]['value']

        driver.quit()

        response = []

        if hashtagId != None:
            while True:
                try:
                    var = data['body']['hasMore']
                    hasMore = True
                except:
                    hasMore = False
                    cookie = ''.join(
                        random.choice(string.ascii_uppercase + string.digits)
                        for _ in range(40))

                    url = "https://www.tiktok.com/share/item/list?secUid=&id=" + hashtagId + "&type=3&count=" + \
                            str(count - len(response)) + "&minCursor=-1&maxCursor=0&_signature=" + \
                            self.signature + "&shareUid="

                    headers = {
                        "authority":
                        "www.tiktok.com",
                        "method":
                        "GET",
                        "path":
                        url.split("https://www.tiktok.com")[1],
                        "scheme":
                        "https",
                        "accept":
                        "application/json, text/plain, */*",
                        "accept-encoding":
                        "gzip, deflate, br",
                        "accept-language":
                        "en-US,en;q=0.9",
                        "cache-control":
                        "no-cache",
                        "cookie":
                        cookie,
                        "referer":
                        "https://www.tiktok.com/tag/" + hashtag +
                        "?langCountry=en",
                        "sec-fetch-mode":
                        "cors",
                        "sec-fetch-site":
                        "same-origin",
                        "user-agent":
                        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
                    }

                    r = requests.get(url, headers=headers)

                    data = r.json()
                    if data["statusCode"] == 0:
                        for tiktok in data["body"]["itemListData"]:
                            response.append(tiktok)

                if hasMore == True:
                    if count > len(response) and str(
                            data['body']['hasMore']) == "True":
                        cookie = ''.join(
                            random.choice(string.ascii_uppercase +
                                          string.digits) for _ in range(40))

                        url = "https://www.tiktok.com/share/item/list?secUid=&id=" + hashtagId + "&type=3&count=" + \
                            str(count - len(response)) + "&minCursor=-1&maxCursor=" + data['body']['maxCursor'] + "&_signature=" + \
                            self.signature + "&shareUid="

                        headers = {
                            "authority":
                            "www.tiktok.com",
                            "method":
                            "GET",
                            "path":
                            url.split("https://www.tiktok.com")[1],
                            "scheme":
                            "https",
                            "accept":
                            "application/json, text/plain, */*",
                            "accept-encoding":
                            "gzip, deflate, br",
                            "accept-language":
                            "en-US,en;q=0.9",
                            "cache-control":
                            "no-cache",
                            "cookie":
                            cookie,
                            "referer":
                            "https://www.tiktok.com/tag/" + hashtag +
                            "?langCountry=en",
                            "sec-fetch-mode":
                            "cors",
                            "sec-fetch-site":
                            "same-origin",
                            "user-agent":
                            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
                        }
                        r = requests.get(url, headers=headers)

                        data = r.json()

                        if data["statusCode"] == 0:
                            for tiktok in data["body"]["itemListData"]:
                                response.append(tiktok)

                    else:
                        return response

        else:
            raise Exception('Unable to locate the hashtag ID')
import requests
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options

ioc = input("informe o código ICAO que deseja buscar informações: ")
url = url = f"https://www.aisweb.aer.mil.br/?i=aerodromos&codigo={ioc}"
print("Aguarde, informações sendo coletadas!")

option = Options()
option.headless = True

driver = webdriver.Firefox(options=option)
driver.get(url)
carts_elements = list()

try:
    element = driver.find_elements_by_class_name('list')
    size_element = len(element)
    start = 0

    for c in range(start, size_element):
        element = driver.find_elements_by_class_name(
            'list')[start].find_elements_by_tag_name('li')
        amount_element = len(element)
        start_elementos = 0
        if (amount_element > 1):
            for d in range(start_elementos, amount_element):
                elementary = driver.find_elements_by_class_name(
                    'list')[start].find_elements_by_tag_name(
                        'li')[start_elementos].text
    def __init__(self, path_of_task, php_proxy = 'CORP', s3_proxy = 'CORP', slack_proxy = 'CORP', slack_channel = 'log-test'):
        '''
        param path_of_task  : path of task file for execution
        param php_proxy     : proxy setting for logging in PhpMyAdmin chosen from 'AWS' / 'GCP' / 'CORP' / 'LOCAL' (use local settnig) or None (No Proxy) with default = 'CORP'
        param s3_proxy      : S3 proxy chosen from AWS /GCP / CORP or None (not setting proxy) with default = 'CORP'
        param slack_proxy   : Slack proxy chosen from AWS /GCP / CORP or None (not setting proxy) with default = 'CORP'
        param slack_channel : slack channel for recording log with default = 'log-test'
        '''
        
        starting_time = datetime.now()
        self.is_driver_launch_succeed = False
        
        try:
            # change initial slack proxy setting of SlackHandler & s3 proxy setting of S3Handler       
            super().__init__(s3_proxy, slack_proxy, slack_channel)
        
            # read task file
            with open(path_of_task,'r') as file:
                self.task_information_dictionary = json.load(file)

            # modify option
            options = Options()
            
            # set headless mode
            options.headless = True
            
            # modify profile
            profile = webdriver.FirefoxProfile()

            # not use proxy
            if not php_proxy:
                profile.set_preference("network.proxy.type", 0)
            
            elif php_proxy != 'LOCAL':
                proxy = self.key_dict['proxy'][php_proxy]
                profile.set_preference('network.proxy.type', 1)
                profile.set_preference('network.proxy.http', proxy['host'])
                profile.set_preference('network.proxy.http_port', proxy['port'])
                profile.set_preference('network.proxy.ssl',proxy['host'])
                profile.set_preference('network.proxy.ssl_port', proxy['port'])
            
            # modify download setting
            profile.set_preference("browser.download.folderList", 2)
            profile.set_preference("browser.download.manager.showWhenStarting", False)
            # set download directory
            profile.set_preference("browser.download.dir", self.task_information_dictionary['download_directory'])
            
            # types of file for directly executing downloading task without asking
            profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
                                '''
                                text/plain,text/x-csv,text/csv,
                                application/vnd.ms-excel,
                                application/csv,application/x-csv,
                                text/csv,text/comma-separated-values,
                                text/x-comma-separated-values,
                                text/tab-separated-values,
                                application/pdf
                                ''')
            
            # evoke driver with geckodriver path and options
            self.driver = webdriver.Firefox(options = options,firefox_profile = profile, \
                                            executable_path = self.task_information_dictionary['geckodriver_path'])
            self.driver.maximize_window()
            # setting static waiting time
            self.driver.implicitly_wait(20)

            self.is_driver_launch_succeed = True
        
        except Exception as E:
            error_log = str(E)
        
        ending_time = datetime.now()
        message = '{} to launch driver using time: {}{}'.format('Succeeded' if self.is_driver_launch_succeed else 'Failed',
                                                                     ending_time - starting_time, 
                                                                     '' if self.is_driver_launch_succeed else ' due to {}'.format(error_log))
        print(message + '\n\n')
        
        self.PostMessage(self.slack_channel, message, '{}_Log_PhpMyAdmin'.format('Correct' if self.is_driver_launch_succeed else 'Error'))
Beispiel #11
0
import shutil
from time import sleep
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.proxy import Proxy, ProxyType
from stem import Signal
from stem.control import Controller
import random
import requests
import json
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary

elevate()

proxy = 0
options = Options()
options.headless = True  #запуск браузера в headless режиме
hidemy = True  #Сбор прокси с hidemy.name
freeproxy = False  #Сбор прокси с free-proxy.cz
using_proxy = False

if (os.stat("proxy_0.txt").st_size > 0 and using_proxy == True):
    try:
        if (os.stat("proxy/proxy_0.txt").st_size > 0):
            src_dir = os.path.join(os.curdir, "proxy")
            dst_dir = os.curdir
            src_file = os.path.join(src_dir, "proxy_0.txt")
            shutil.copy(src_file, dst_dir)
        with open('proxy_0.txt') as f:
            content = f.readlines()
        content = [x.strip() for x in content]
        myproxy = random.choice(content)
Beispiel #12
0
async def on_message(message):
    if message.author == client.user:
        return

    if message.content.startswith('$s'):
        await message.channel.send('Bot Nike pronto pra missao')

    if message.content.startswith('$getcalendario'):
        db_conn = mysql.connector.connect(host="129.213.131.233",
                                          port="600",
                                          user="******",
                                          passwd="root",
                                          database="snkrs")
        cursor = db_conn.cursor()

        binary = FirefoxBinary(
            'C:\\Program Files\\Firefox Developer Edition\\firefox.exe')
        url = 'https://www.nike.com.br/Snkrs#calendario'

        opts = Options()
        opts.headless = True
        driver = webdriver.Firefox(firefox_binary=binary,
                                   executable_path='C:\\geckodriver.exe',
                                   options=opts)

        driver.get(url)

        time.sleep(9)
        driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
        time.sleep(3)
        driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
        time.sleep(9)

        #element = driver.find_element_by_xpath("/html/body/main/div/div[3]/section/div/div/div/div[@class='box-resultados vitrine-content--feed grid-produtos grid-produtos--3-col snkr-container']")
        element = driver.find_element_by_xpath(
            "//*[@id='DadosPaginacaoCalendario']")
        html_content = element.get_attribute("outerHTML")
        soup = BeautifulSoup(html_content, 'lxml')
        a_href = soup.find_all('a', href=True)

        hreflist = []

        for a in a_href:
            href = str(a['href'])
            hreflist.append(href)

        for i in range(len(hreflist)):
            if hreflist[i - 1] != hreflist[i]:
                url = hreflist[i - 1]
                datetimedb = datetime.datetime.now()
                print(url)
                await message.channel.send(url)
                driver.get(url)
                time.sleep(1.2)
                element = driver.find_element_by_xpath(
                    "/html/body/main/div/div[1]/div[3]/div/div[2]/div[@class='nome-preco-produto']"
                )
                html_content = element.get_attribute("outerHTML")
                soup = BeautifulSoup(html_content, 'lxml')
                modelo = soup.find_all('span')
                cw = soup.find_all('a')

                for node in modelo:
                    print(''.join(node.findAll(text=True)))
                    modelodb = str(node.findAll(text=True))
                    #await message.channel.send(modelodb.replace("['", "").replace("']", "").replace("'", ""))
                    modelolist.append(
                        modelodb.replace("['",
                                         "").replace("']",
                                                     "").replace("'", ""))

                for node in cw:
                    print(' '.join(node.findAll(text=True)))
                    cwdb = str(node.findAll(text=True))
                    #await message.channel.send(cwdb.replace("['", "").replace("']", "").replace("'", ""))
                    cwlist.append(
                        cwdb.replace("['", "").replace("']",
                                                       "").replace("'", ""))

                element = driver.find_element_by_xpath(
                    "/html/body/main/div/div[1]/div[3]/div/div[2]/div[2]/span/span/span[@class='js-valor-por']"
                )
                html_content = element.get_attribute("outerHTML")
                soup = BeautifulSoup(html_content, 'lxml')
                preco = soup.find_all('span')

                for node in preco:
                    print(''.join(node.findAll(text=True)))
                    precodb = str(node.findAll(text=True))
                    #await message.channel.send(precodb.replace("['", "").replace("']", "").replace("'", ""))
                    precolist.append(
                        float(
                            precodb.replace("['", "").replace(
                                "']",
                                "").replace("'",
                                            "").replace("R$",
                                                        "").replace(",", ".")))

                try:
                    element = driver.find_element_by_xpath(
                        "/html/body/main/div/div[1]/div[3]/div/div[2]/h3")
                    html_content = element.get_attribute("outerHTML")
                    soup = BeautifulSoup(html_content, 'lxml')
                    data = soup.find_all('h3')
                    for node in data:
                        print(''.join(node.findAll(text=True)))
                        datadb = str(node.findAll(text=True))
                        #await message.channel.send(datadb.replace("['", "").replace("']", "").replace("'", ""))
                        datalist.append(
                            datadb.replace("['",
                                           "").replace("']",
                                                       "").replace("'", ""))
                except NoSuchElementException:
                    print('data não encontrado.')

                df = pd.DataFrame({
                    'Link': [url],
                    'Modelo': [modelolist],
                    'CW': [cwlist],
                    'Preço': [precolist],
                    'Data': [datalist]
                })

                await message.channel.send(df.sort_values(by='Preço'))

                #sql = """insert into site_refer (refer_href, refer_data, refer_preco, refer_cw, refer_modelo) values(%s, %s, %s, %s, %s)"""
                #val = (str(url), datadb, precodb, cwdb, modelodb)
                #cursor.execute(sql, val)
                #sql = """insert into logs(html_logs, data_hora_logs, tipo_site) values(%s, %s, 1)"""
                #val = (str(url), datetimedb)
                #cursor.execute(sql, val)
                #db_conn.commit()

    driver.quit()
    db_conn.close()
Beispiel #13
0
def download_gisaid_EpiCoV(
        uname,  # username
        upass,  # password
        normal,  # normal mode (quite)
        wd,  # output dir
        loc,  # location
        host,  # host
        cs,  # collection start date
        ce,  # collection end date
        ss,  # submission start date
        se,  # submission end date
        cg,  # complete genome only
        hc,  # high coverage only
        le,  # low coverage excluding
        to,  # timeout in sec
        rt,  # num of retry
        iv,  # interval in sec
        nnd,  # do not download nextstrain data
        ffbin  # firefox binary path
):
    """Download sequences and metadata from EpiCoV GISAID"""

    # when user doesn't download nextstrain data, it's essential to enter time range/location
    if not (cs or ce or ss or se or loc) and nnd:
        logging.error("No time range or location entered.")
        sys.exit(1)

    # output directory
    if not os.path.exists(wd):
        os.makedirs(wd, exist_ok=True)

    wd = os.path.abspath(wd)
    GISAID_DTL_JASON = f'{wd}/gisaid_detail_metadata.json'
    metadata = []

    # MIME types
    mime_types = "application/octet-stream"
    mime_types += ",application/excel,application/vnd.ms-excel"
    mime_types += ",application/pdf,application/x-pdf"
    mime_types += ",application/x-bzip2"
    mime_types += ",application/x-gzip,application/gzip"

    # start fresh
    try:
        os.remove(GISAID_DTL_JASON)
    except OSError:
        pass

    logging.info("Opening browser...")
    profile = webdriver.FirefoxProfile()
    profile.set_preference("browser.download.folderList", 2)
    profile.set_preference("browser.download.manager.showWhenStarting", False)
    profile.set_preference("browser.download.dir", wd)
    profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
                           mime_types)
    profile.set_preference("plugin.disable_full_page_plugin_for_types",
                           mime_types)
    profile.set_preference("pdfjs.disabled", True)
    profile.update_preferences()

    options = Options()
    if not normal:
        options.headless = True

    driver = webdriver.Firefox(firefox_profile=profile,
                               options=options,
                               firefox_binary=ffbin)

    # driverwait
    driver.implicitly_wait(30)
    wait = WebDriverWait(driver, to)

    # open GISAID
    logging.info("Opening website GISAID...")
    driver.get('https://www.epicov.org/epi3/frontend')
    waiting_sys_timer(wait)
    logging.info(driver.title)
    assert 'GISAID' in driver.title

    # login
    logging.info("Logining to GISAID...")
    username = driver.find_element_by_name('login')
    username.send_keys(uname)
    password = driver.find_element_by_name('password')
    password.send_keys(upass)
    driver.execute_script("return doLogin();")

    waiting_sys_timer(wait)

    # navigate to EpiFlu
    logging.info("Navigating to EpiCoV...")
    epicov_tab = driver.find_element_by_xpath("//div[@id='main_nav']//li[3]/a")
    epicov_tab.click()

    waiting_sys_timer(wait)

    # download nextstrain data
    if not nnd:
        # download from downloads section
        logging.info("Clicking downloads...")
        pd_button = wait.until(
            EC.element_to_be_clickable(
                (By.XPATH, "//div[@class='sys-actionbar-bar']//div[3]")))
        pd_button.click()
        waiting_sys_timer(wait)

        # have to click the first row twice to start the iframe
        iframe_dl = waiting_for_iframe(wait, driver, rt, iv)

        logging.info("Downloading metadata...")
        driver.switch_to.frame(iframe_dl)
        waiting_sys_timer(wait)
        dl_button = driver.find_element_by_xpath(
            '//div[contains(text(), "metadata")]')
        dl_button.click()
        waiting_sys_timer(wait)
        # waiting for REMINDER
        iframe = waiting_for_iframe(wait, driver, rt, iv)
        driver.switch_to.frame(iframe)
        waiting_sys_timer(wait)
        # agree terms and conditions
        logging.info(" -- agreeing terms and conditions")
        checkbox = driver.find_element_by_xpath(
            '//input[@class="sys-event-hook"]')
        checkbox.click()
        waiting_sys_timer(wait)
        # click download button
        dl_button = wait.until(
            EC.element_to_be_clickable(
                (By.XPATH, '//button[contains(text(), "Download")]')))
        dl_button.click()
        waiting_sys_timer(wait)
        logging.info(" -- downloading")
        # Opening Firefox downloading window
        driver.switch_to.default_content()
        fn = wait_downloaded_filename(wait, driver, 600)
        logging.info(f" -- downloaded to {fn}.")

        waiting_sys_timer(wait)

        logging.info("Downloading FASTA...")
        driver.switch_to.frame(iframe_dl)
        waiting_sys_timer(wait)
        # click nextfasta button
        dl_button = wait.until(
            EC.element_to_be_clickable((By.XPATH, '//div[text()="FASTA"]')))
        dl_button.click()
        waiting_sys_timer(wait)
        # waiting for REMINDER
        iframe = waiting_for_iframe(wait, driver, rt, iv)
        driver.switch_to.frame(iframe)
        waiting_sys_timer(wait)
        # agree terms and conditions
        logging.info(" -- agreeing terms and conditions")
        checkbox = driver.find_element_by_xpath(
            '//input[@class="sys-event-hook"]')
        checkbox.click()
        waiting_sys_timer(wait)
        # click download button
        dl_button = wait.until(
            EC.element_to_be_clickable(
                (By.XPATH, '//button[contains(text(), "Download")]')))
        dl_button.click()
        waiting_sys_timer(wait)
        logging.info(" -- downloading")
        # Opening Firefox downloading window
        driver.switch_to.default_content()
        fn = wait_downloaded_filename(wait, driver, 600)
        logging.info(f" -- downloaded to {fn}.")

        waiting_sys_timer(wait)

        # go back to main frame
        driver.switch_to.frame(iframe_dl)
        back_button = driver.find_element_by_xpath(
            '//button[contains(text(), "Back")]')
        back_button.click()

        driver.switch_to.default_content()
        waiting_sys_timer(wait)

    if cs or ce or ss or se or loc:
        logging.info("Browsing EpiCoV...")
        browse_tab = wait.until(
            EC.element_to_be_clickable(
                (By.XPATH, '//*[contains(text(), "Browse")]')))
        browse_tab.click()
        waiting_sys_timer(wait)
        waiting_table_to_get_ready(wait)

        # set location
        if loc:
            logging.info("Setting location...")
            loc_input = driver.find_element_by_xpath(
                "//td/div[contains(text(), 'Location')]/../following-sibling::td/div/div/input"
            )
            loc_input.send_keys(loc)
            waiting_sys_timer(wait, 7)

        # set host
        if host:
            logging.info("Setting host...")
            host_input = driver.find_element_by_xpath(
                "//td/div[contains(text(), 'Host')]/../following-sibling::td/div/div/input"
            )
            host_input.send_keys(host)
            waiting_sys_timer(wait, 7)

        # set dates
        date_inputs = driver.find_elements_by_css_selector(
            "div.sys-form-fi-date input")
        dates = (cs, ce, ss, se)
        for dinput, date in zip(date_inputs, dates):
            if date:
                logging.info("Setting date...")
                dinput.send_keys(date)

        ActionChains(driver).send_keys(Keys.ESCAPE).perform()
        waiting_sys_timer(wait, 7)

        # complete genome only
        if cg:
            logging.info("complete genome only...")
            checkbox = driver.find_element_by_xpath(
                '//input[@value="complete"]')
            checkbox.click()
            waiting_sys_timer(wait)

        # high coverage only
        if hc:
            logging.info("high coverage only...")
            checkbox = driver.find_element_by_xpath('//input[@value="highq"]')
            checkbox.click()
            waiting_sys_timer(wait)

        # excluding low coverage
        if le:
            logging.info("low coverage excluding...")
            checkbox = driver.find_element_by_xpath('//input[@value="lowco"]')
            checkbox.click()
            waiting_sys_timer(wait)

        # check if any genomes pass filters
        warning_message = None
        try:
            warning_message = driver.find_element_by_xpath(
                "//div[contains(text(), 'No data found.')]")
        except:
            pass
        if warning_message:
            logging.info("No data found.")
            sys.exit(1)

        # select all genomes
        logging.info("Selecting all genomes...")
        button_sa = driver.find_element_by_css_selector(
            "span.yui-dt-label input")
        button_sa.click()
        waiting_sys_timer(wait)

        # downloading sequence data
        num_download_options = 1
        current_option = 0

        retry = 0
        while retry <= rt and current_option < num_download_options:
            try:
                logging.info("Downloading sequences for selected genomes...")
                button = driver.find_element_by_xpath(
                    "//td[@class='sys-datatable-info']/button[contains(text(), 'Download')]"
                )
                button.click()
                waiting_sys_timer(wait)

                # switch to iframe
                iframe = waiting_for_iframe(wait, driver, rt, iv)
                driver.switch_to.frame(iframe)
                waiting_sys_timer(wait)

                # selecting options
                labels = driver.find_elements_by_xpath("//label")
                num_download_options = len(labels)
                labels[current_option].click()
                current_option += 1

                button = driver.find_element_by_xpath(
                    "//button[contains(text(), 'Download')]")
                button.click()
                waiting_sys_timer(wait)
                driver.switch_to.default_content()

                fn = wait_downloaded_filename(wait, driver, 1800)
                logging.info(f"Downloaded to {fn}.")
            except:
                logging.info(f"retrying...#{retry} in {iv} sec(s)")
                if retry == rt:
                    logging.error("Unexpected error:", sys.exc_info())
                    sys.exit(1)
                else:
                    time.sleep(iv)
                    retry += 1

    # close driver
    driver.quit()
Beispiel #14
0
def write_parsed_to_csv(page_url,
                        map_info,
                        writer,
                        pscores,
                        page_number=2,
                        web_driver=None):
    """Given the current page URL, extract the information from each apartment in the list"""

    # We start on page_number = 2, since we will already be parsing page_number 1

    # if we are loading the page for the first time, we want to initailize the web driver
    if (web_driver != None):
        driver = web_driver
    else:
        options = Options()
        options.headless = True
        if ('debian' in platform.platform()):
            driver = webdriver.Firefox(firefox_binary='/usr/bin/firefox-esr',
                                       options=options)
        else:
            driver = webdriver.Firefox(options=options)
        driver.get(page_url)

    # read the current page
    soup = BeautifulSoup(driver.page_source, 'html.parser')

    # soupify the current page
    soup.prettify()
    # only look in this region
    soup = soup.find('div', class_='placardContainer')

    # append the current apartments to the list
    for item in soup.find_all('article', class_='placard'):
        url = ''
        rent = ''
        contact = ''

        if item.find('a', class_='placardTitle') is None: continue
        url = item.find('a', class_='placardTitle').get('href')

        # get the rent and parse it to unicode
        obj = item.find('span', class_='altRentDisplay')
        if obj is not None:
            rent = obj.getText().strip()

        # get the phone number and parse it to unicode
        obj = item.find('div', class_='phone')
        if obj is not None:
            contact = obj.getText().strip()

        # get the other fields to write to the CSV
        fields = parse_apartment_information(url, map_info)

        # make this wiki markup
        fields['name'] = '[' + str(fields['name']) + '](' + url + ')'
        fields[
            'address'] = '[' + fields['address'] + '](' + fields['map'] + ')'

        # fill out the CSV file
        row = [
            fields['name'], contact, fields['address'], fields['size'], rent,
            fields['monthFees'], fields['onceFees'], fields['petPolicy'],
            fields['distance'], fields['duration'], fields['parking'],
            fields['gym'], fields['kitchen'], fields['amenities'],
            fields['features'], fields['space'], fields['lease'],
            fields['services'], fields['info'], fields['indoor'],
            fields['outdoor'], fields['img'], fields['description']
        ]
        # add the score fields if necessary
        if pscores:
            for i in xrange(len(row), 0, -1):
                row.insert(i, '5')
            row.append('0')
        # write the row
        writer.writerow(row)

    page_number_str = str(page_number)

    # check for our next page number
    try:
        page_number_element = driver.find_element_by_xpath("//a[@data-page='" +
                                                           page_number_str +
                                                           "']")
        page_number_element.click()
        time.sleep(1)
    # we will get a no element found exception, meaning our search has come to an end
    except:
        driver.quit()
        return

    # recurse until the last page
    write_parsed_to_csv("none", map_info, writer, pscores, page_number + 1,
                        driver)
Beispiel #15
0
HEADLESS = os.access('YES', os.R_OK)
DB_DIR_SET = os.access('data', os.R_OK) and os.access(
    'data/db', os.R_OK) and os.access('data/logs', os.R_OK)
debug = False
# -----------
ff = None  # browser client

print(f'HEADLESS:{HEADLESS}')
#print(sys.argv)
print('setting up!')
browser = sys.argv[1].lower()

if browser == "firefox":
    from selenium.webdriver.firefox.options import Options
    options = Options()
    options.headless = HEADLESS
    ff = Firefox(executable_path=FF_PATH, options=options)
elif browser == "chrome":
    from selenium.webdriver.chrome.options import Options
    options = Options()
    if sys.argv[-1].lower() == 'android':
        options.add_experimental_option('androidPackage', 'com.android.chrome')
        print('run on android')
        ff = Chrome(CHRA_PATH, options=options)
    else:
        options.headless = HEADLESS
        ff = Chrome(executable_path=CHR_PATH, options=options)
elif browser == "debug":
    print("Debug mode")
    print('run script in interactive mode with -i option')
    debug = True
 def setUp(self):
     options = Options()
     options.headless = True
     self.driver = webdriver.Firefox(
         options=options, executable_path=GeckoDriverManager().install())
Beispiel #17
0
def before_all(context):
    options = Options()
    options.headless = HEADLESS
    context.browser = webdriver.Firefox(options=options)
    context.browser.set_window_size(SCREEN_WIDTH, SCREEN_HEIGHT)
Beispiel #18
0
 def create_driver(self):
     opts = Options()
     opts.headless = True
     return webdriver.Firefox(
         options=opts, executable_path=GeckoDriverManager().install())
Beispiel #19
0
def main():
    # Set up Selenium options to enable headless access
    options = Options()
    options.headless = True
    driver = webdriver.Firefox(options=options,
                               executable_path='/usr/local/bin/geckodriver')

    # Get identifier from address
    loc_id = address.split('/')[-1]
    # Parse the Accuweather websites: Most data is collected from this page
    curr_address = (address.split('weather-forecast')[0] + 'current-weather/' +
                    loc_id)
    driver.get(curr_address)
    curr_cond = BeautifulSoup(driver.page_source, 'lxml')
    # Precipitation data is collected from this page
    daily_address = (address.split('weather-forecast')[0] +
                     'daily-weather-forecast/' + loc_id)
    driver.get(daily_address)
    daily_cond = BeautifulSoup(driver.page_source, 'lxml')("div",
                                                           "column detail")[0]
    # Extended forecasts are collected from this page
    final_address = daily_address + '?day=6'
    driver.get(final_address)
    final_cond = BeautifulSoup(driver.page_source, 'lxml')
    driver.quit()

    # Strain 'curr_cond' soup
    # NOTE: Accuweather redesigned their website sometime late 2018, so the
    #       code will need to be strained differently:
    try:
        forecast = curr_cond("div", "conditions-wrapper day")[0]
    except IndexError:
        forecast = curr_cond("div", "conditions-wrapper night")[0]
    sun = curr_cond("div", "block rise-set sun")[0]
    moon = curr_cond("div", "block rise-set moon")[0]
    highs = (curr_cond("table", "temp-history text--size14")
             [0].contents[3].contents[0].text.split('\n'))
    lows = (curr_cond("table", "temp-history text--size14")
            [0].contents[3].contents[2].text.split('\n'))

    # Strain soup for daily forecasts
    panel_list = curr_cond("div", "five-day")[0]
    extended_list = final_cond("div", "five-day")[0]

    # Get moon phase info
    # Check that we have interent connection (using EAFP principle)
    # Admittedly, this isn't as clean as I would like.
    try:
        HEADERS = REQUEST_HEADERS
        requests.get(MOON_ADDRESS, headers=REQUEST_HEADERS, timeout=3.05)
    except Exception as e:
        HEADERS = WGET_HEADERS
        pass
    try:
        requests.get(MOON_ADDRESS, headers=WGET_HEADERS, timeout=3.05)
    except Exception as e:
        sys.exit()

    moon_info = BeautifulSoup(
        requests.get(MOON_ADDRESS, headers=HEADERS).text, 'lxml')

    # Get geolocation info for forecast
    city = curr_cond.find(class_="locality").find().get("title")
    region = curr_cond.find("abbr").get("title")
    country = curr_cond.find(class_="country-name").get("title")

    # Parse JSON data in moon_info
    moon_dict = json.loads(
        moon_info("script")[3].string.split("jArray")[1].rsplit(";")[0].lstrip(
            "="))['2']
    moon_minus = (moon_info('img', class_='moonNotToday')
                  [1].attrs['src'].split('moon_day_')[1].split('.')[0])
    moon_icon = (moon_info.find(
        id='todayMoonContainer').find().get('alt').split('moon_phase_')[1])
    moon_plus = (moon_info('img', class_='moonNotToday')[2].attrs['src'].split(
        'moon_day_')[-1].split('.')[0])

    # Determine temperature scale used in HTML
    scale = (curr_cond("div", "subnav-dropdown-container location-crumbs")
             [0].contents[5].contents[3].text[-1])
    # [2018-11-15 Thu] Accuweather changed their setup, and now there is no
    # explicit declaration of the scale used on the page, so I scrape it from
    # its appearance on the top of the page.
    # I don't see Accuweather using Kelvin or other scales anytime soon.

    # Collect the weather information
    # NOTE: I strip the degree symbol to be able to do conditional comparisons
    #       of the temperatures in Conky.  I later append the degree symbols in
    #       the Lua script.
    # NOTE: '\xc2' is part of the UTF8 representation of the degree symbol
    weather = OrderedDict()
    weather['current_icon'] = image_conkyweather[forecast(
        "div", "icon")[0].contents[1]['src'].split('/')[-1].split('.')[0]]
    weather['current_cond'] = forecast("div", "phrase")[0].text
    weather['current_temp'] = convert_item(
        forecast("div", "hi")[0].text.strip('°'), 'temp', scale, out_scale)
    weather['current_feel'] = convert_item(
        forecast("div", "realfeel")[0].text.split()[1].strip('°'), 'temp',
        scale, out_scale)
    weather['windicon'] = image_conkywindnesw[forecast(
        "div", "wind-point")[0]['class'][1]]
    weather['wind_spd'] = (convert_item(
        forecast("div", "speed")[0].text.split()[0], 'dist', scale, out_scale)
                           + units_conversion[out_scale + 'speed'])
    weather['humidity'] = (forecast("div",
                                    "details")[0].contents[1].text.split()[1])
    weather['pressure'] = (convert_item(
        forecast("div", "details")[0].contents[3].text.split()[1], 'pressure',
        scale, out_scale) + units_conversion[out_scale + 'pressure'])
    weather['uv_index'] = (forecast("div",
                                    "details")[0].contents[5].text.split()[-1])
    weather['cloudcov'] = (forecast("div",
                                    "details")[0].contents[7].text.split()[-1])
    weather['dewpoint'] = convert_item(
        forecast("div", "details")[0].contents[11].text.split()[2].strip('°'),
        'temp', scale, out_scale)
    weather['visiblty'] = (convert_item(
        forecast("div", "details")[0].contents[13].text.split()[1], 'dist',
        scale, out_scale) + units_conversion[out_scale + 'dist'])
    weather['sunrise'] = convert_time(sun("time", "rise")[0].text)
    weather['sunset'] = convert_time(sun("time", "set")[0].text)
    weather['suntime'] = skytime(sun, weather['sunrise'], weather['sunset'])
    weather['moonrise'] = convert_time(moon("time", "rise")[0].text)
    weather['moonset'] = convert_time(moon("time", "set")[0].text)
    weather['moontime'] = skytime(moon, weather['moonrise'],
                                  weather['moonset'])
    weather['moon_phase'] = moon_dict[7]
    weather['moonshine'] = ''.join(d
                                   for d in moon_dict[1] if d.isdigit()) + '%'
    weather['moon_minus'] = image_moongiant[moon_minus]
    weather['moon_icon'] = image_moongiant[moon_icon]
    weather['moon_plus'] = image_moongiant[moon_plus]

    # Make separate dictionary for temperature history
    history = OrderedDict()
    history['high_today'] = convert_item(highs[2].strip('°'), 'temp', scale,
                                         out_scale)
    history['high_mean'] = convert_item(highs[3].strip('°'), 'temp', scale,
                                        out_scale)
    if highs[5] == 'N/A':
        history['high_record'] = 'No record'
        history['high_record_year'] = ''
    else:
        history['high_record'] = convert_item(highs[5].split('°')[0], 'temp',
                                              scale, out_scale)
        history['high_record_year'] = highs[5].split()[1]
    history['high_last_year'] = convert_item(highs[7].strip('°'), 'temp',
                                             scale, out_scale)

    history['low_today'] = convert_item(lows[2].strip('°'), 'temp', scale,
                                        out_scale)
    history['low_mean'] = convert_item(lows[3].strip('°'), 'temp', scale,
                                       out_scale)
    if lows[5] == 'N/A':
        history['low_record'] = 'No record'
        history['low_record_year'] = ''
    else:
        history['low_record'] = convert_item(lows[5].split('°')[0], 'temp',
                                             scale, out_scale)
        history['low_record_year'] = lows[5].split()[1]
    history['low_last_year'] = convert_item(lows[7].strip('°'), 'temp', scale,
                                            out_scale)

    # Make a dictionary for the precipitation info
    precipitation = OrderedDict()
    precipitation['percent'] = daily_cond.text.split('\n')[2].split()[1]
    precipitation['amount'] = (convert_item(
        daily_cond.text.split('\n')[3].split()[1], 'measure', scale, out_scale)
                               + units_conversion[out_scale + 'measure'])
    precipitation['hours'] = daily_cond.text.split('\n')[7].split()[3] + " hrs"

    # Collect forecast information and store in nested dictionaries)
    daily_forecasts = strain_forecast(panel_list, scale, out_scale)
    extended_forecasts = strain_forecast(extended_list, scale, out_scale)

    # Create a timestamp
    now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # Change directory
    os.chdir(outputpath)

    # Write output
    text = open("conditions", "w")
    for key in weather.keys():
        text.write("%s\n" % weather[key])
    for key in history.keys():
        text.write("%s\n" % history[key])
    for key in precipitation.keys():
        text.write("%s\n" % precipitation[key])
    text.write("%s, %s\n" % (city, region))
    text.write("%s\n" % country)
    text.write("%s\n" % now)
    text.write("%s\n" % out_scale)
    text.close()
    text = open("forecast", "w")
    for x in range(0, 5):
        for key in daily_forecasts[x].keys():
            text.write("%s\n" % daily_forecasts[x][key])
    for x in range(0, 5):
        for key in extended_forecasts[x].keys():
            text.write("%s\n" % extended_forecasts[x][key])
    text.close()
    return None
Beispiel #20
0
 def __init__(self):
     options = Options()
     options.headless = True
     self.driver = webdriver.Firefox(options=options)
     self.driver.set_script_timeout(10)
     self.driver.set_page_load_timeout(10)
Beispiel #21
0
def _get_driver():
    options = Options()
    options.headless = True
    return webdriver.Firefox(options=options, executable_path=gecko_bin, service_log_path=gecko_log)
Beispiel #22
0
    def runBot(self):

        #TODO Proxy aufsetzten
        #PROXY = "91.205.174.26:80"
        #webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
        #    "httpProxy": PROXY,
        #    "ftpProxy": PROXY,
        #    "sslProxy": PROXY,
        #    "proxyType": "MANUAL",
        #}

        #with webdriver.Firefox() as driver:
        #    # Open URL
        #    driver.get(url)

        options = Options()
        if self.headless:
            options.headless = True

        driver = webdriver.Firefox(
            options=options)  # executable_path=r"/usr/bin/geckodriver"
        driver.delete_all_cookies()
        driver.get(self.url)

        # Get video duration for refresh time
        duration_element = driver.find_element_by_class_name(
            'ytp-time-duration')
        runtime_full = duration_element.text
        runtime_calc = runtime_full.split(":")
        runtime_max_sec = (((int(runtime_calc[0]) * 60) / 100) *
                           int(self.runtime))
        print(runtime_max_sec)
        # Signin popup for sponsoered videos
        if self.pro:
            try:
                sponsored_login = WebDriverWait(driver, 10).until(
                    EC.visibility_of_element_located(
                        (By.XPATH, "//div[@id='dismiss-button']")))
                print(sponsored_login.__dict__)
                for sponsored_element in sponsored_login.find_elements_by_xpath(
                        "//div[@id='dismiss-button']"):
                    print(sponsored_element.text)
                    print(sponsored_element.id)
                    print(sponsored_element.get_attribute("class"))
                    # sleep timer to imitaed a "real" person
                    time.sleep(random.uniform(1, 2.5))
                    ActionChains(driver).click(sponsored_element).perform()
            except:
                print("No Login Button")

        # Cookie popup
        WebDriverWait(driver, 10).until(
            EC.frame_to_be_available_and_switch_to_it((By.ID, "iframe")))
        cookieFrame = WebDriverWait(driver, 20).until(
            EC.visibility_of_element_located(
                (By.XPATH, "//div[@id='introAgreeButton']")))
        print(cookieFrame.__dict__)
        # print(driver.page_source) #html code
        for cookie_element in cookieFrame.find_elements_by_xpath(
                "//div[@id='introAgreeButton']"):
            print(cookie_element.text)
            print(cookie_element.id)
            print(cookie_element.get_attribute("class"))
            # sleep timer to imitated a "real" person
            time.sleep(random.uniform(1, 2.5))
            ActionChains(driver).click(cookie_element).perform()

        # Wait for video container
        time.sleep(5)
        driver.switch_to.default_content()

        # Skip ads
        if self.pro is not None:
            try:
                # wait 5 sec till skip button is available
                time.sleep(5)

                ads_element = driver.find_element_by_class_name(
                    'ytp-ad-skip-button-container')
                print(ads_element.__dict__)
                print(ads_element.text)
                ActionChains(driver).click(ads_element).perform()
            except:
                print("No Ads")
        # Min runtime 10 sec
        # Max runtime 70% of video runtime
        print(self.only_view)
        if self.only_view is None:
            time.sleep(random.uniform(10, runtime_max_sec))
            driver.close()
        else:
            driver.close()

        # recursion for test
        self.runBot()
Beispiel #23
0
def getBookInfo(bookID, userLogin, userPassword, dbPath):
  def login(login, password):
    loginButton = WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".login")))
    loginButton.click()
    completeForm(login, password)
    print("Login successful!")

  def completeForm(login, password):
    form = WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "form")))
    loginField = form.find_element_by_css_selector("input[name='login']")
    passwordField = form.find_element_by_css_selector("input[name='password']")
    loginField.send_keys(login)
    passwordField.send_keys(password)
    time.sleep(0.5)
    submit = form.find_element_by_css_selector("button[type='submit']")
    submit.click()

  def getNumberOfPages():
    pagesProperty = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//*[@data-state-content='online']" + "//ul" + "/li[text()[contains(., 'стр')]]")))
    pagesCountString = pagesProperty.get_attribute('textContent')
    return list(map(int, re.findall(r'\d+', pagesCountString)))[0]

  def getBookTitle():
    title = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".page-title h1")))
    return title.get_attribute('textContent').rstrip()

  def openReader():
    URL = driver.current_url
    driver.get(URL + "/reader")
    print("Opened reader.")

  def findRequestKey():
    for request in driver.requests:
      if request.response:
        link = urlparse.unquote(request.url)
        if (("image" in request.response.headers['Content-Type']) and ("OEBPS" in link)):
          splitLink = link.rsplit(".", 1)
          return splitLink[0], splitLink[1]
    return False

  rootURL = "https://bmstu.press"
  homeURL = rootURL + "/catalog/item/" + str(bookID).zfill(4)
  db = TinyDB(dbPath)

  if readDB(bookID, db) != False:
    print("Book found in database. No need to connect to server.")
  else:
    print("Book not found in database. Connecting to server...")
    try:
      swireOptions = {
      'suppress_connection_errors': True,
      'connection_timeout': 50,
      }
      selOptions = Options()
      selOptions.headless = True
      selOptions.capabilities["pageLoadStartegy"] = "eager"

      driver = webdriver.Firefox(options=selOptions, seleniumwire_options=swireOptions) # pylint: disable=unexpected-keyword-arg
      driver.get(homeURL)
      print("Connected to server.")
      login(userLogin, userPassword)
      endPage = getNumberOfPages()
      title = getBookTitle()
      openReader()
      time.sleep(10)
      if findRequestKey() == False:
        raise ValueError("Did not find the right request.")
      key, imgFormat = findRequestKey()
      writeDB(bookID, title, endPage, key, imgFormat, db)
    except:
      print("Could not receive response from server or this book ID might not exist. Please try again or change ID.")
    finally:
      driver.close()
      driver.quit()
  return readDB(bookID, db)
Beispiel #24
0
def init_driver():
    options = Options()
    options.headless = True
    driver = webdriver.Firefox(options=options)
    return driver
def start():
    opts = Options()
    opts.headless = True
    firefox_driver = webdriver.Firefox(
        options=opts, executable_path='firefox\\geckodriver.exe')
    return firefox_driver
Beispiel #26
0
from json import load
from sys import exit
import argparse, getpass
from selenium import webdriver
from selenium.webdriver.firefox.options import Options

with open(r"urls.json") as f:
    urls = load(f)
f = None

options = Options()
options.headless = False

parser = argparse.ArgumentParser()
parser.add_argument("url",
                    type=str,
                    help="expects custom codes of sites",
                    choices=[x for x in urls.keys()])
parser.add_argument("username", type=str, help="expects your username")
parser.add_argument("-p",
                    "--private",
                    action="store_true",
                    help="browse inprivate")
args = parser.parse_args()

uname = args.username
passw = getpass.getpass("password? ")

if args.private:
    firefox_profile = webdriver.FirefoxProfile()
    firefox_profile.set_preference("browser.privatebrowsing.autostart", True)
    def get_freq(self) -> dict:
        '''This function gets the minor allele frequencies for variants of interest'''

        # Creating four list that will work be used to keep track of the variants and the
        variant_list: list = []
        allele_freq_list: list = []
        exome_filter_list: list = []
        genome_filter_list: list = []

        # creating a headless mode
        options = Options()

        options.headless = True
        # Creating a dictionary to act as a handler based on what browser is used

        browser_handling_dict: dict = {
            "firefox": determine_webdriver(self.browser, options),
            "chrome": determine_webdriver(self.browser, options),
        }

        # creating the driver object
        browser = browser_handling_dict[self.browser]

        # iterating through each variant
        for variant in self.var_list:

            # Getting the full page path for a specific variant
            full_url: str = "".join([self.url_start, variant, self.url_end])

            browser.get(full_url)

            # finding the table element
            element = WebDriverWait(browser, self.time).until(
                EC.presence_of_element_located((
                    By.CLASS_NAME,
                    "Table__BaseTable-sc-7fgtt2-0.PopulationsTable__Table-yt4zj1-0.gRZyOM"
                )))

            frequencies_table = browser.find_element_by_class_name(
                "Table__BaseTable-sc-7fgtt2-0.PopulationsTable__Table-yt4zj1-0.gRZyOM"
            )

            # Getting the text from the table element
            pop_freq_txt: str = frequencies_table.text

            # This section gets the first index
            start_indx: int = pop_freq_txt.find("(non-Finnish)")

            start_indx = start_indx + len("(non-Finnish)")

            # Get a substring that starts at the end of the non-Finnish
            pop_freq_substring: str = pop_freq_txt[start_indx:]

            # Use a regular expression to find The first letter in the substring
            letter_match = re.search("[a-zA-Z]", pop_freq_substring)

            first_letter: str = letter_match.group(0)

            # find second index
            end_indx: int = pop_freq_substring.find(first_letter)

            allele_freq: str = pop_freq_substring[:end_indx].split(
                " ")[3].strip("\n").strip(" ")

            print(
                f"This is the allele frequencies for the variant {variant} is {allele_freq}"
            )

            # also have to get the filter status
            filter_tuple: tuple = self.get_filter_status(browser)

            # adding the variant and the allele frequencies to a dictionary

            variant_list.append(variant)
            allele_freq_list.append(allele_freq)
            exome_filter_list.append(filter_tuple[0])
            genome_filter_list.append(filter_tuple[1])

        # creating a dictionary to keep track of the allele frequencies
        gnomad_freq_dict: dict = {
            "RS Name": variant_list,
            "MAF": allele_freq_list,
            "exome_filter_status": exome_filter_list,
            "genome_filter_status": genome_filter_list
        }

        # closign the webdriver
        browser.close()
        browser.quit()

        return gnomad_freq_dict
Beispiel #28
0
def test_verisini_cek():
    options = Options()
    options.headless = True
    global garanti_test
    global X_test
# =============================================================================
#     BIST100
# =============================================================================
    driver_path = r"..\geckodriver\geckodriver.exe"
    fox = webdriver.Firefox(options=options,executable_path=driver_path)
    url = "https://tr.investing.com/indices/ise-100-historical-data"
    fox.get(url)

    time.sleep(5)
    bist100_open = fox.find_element_by_xpath('//*[@id="curr_table"]/tbody/tr[2]/td[3]')
    bist100_open = bist100_open.get_attribute('innerHTML')
    fox.close()
# =============================================================================
#      Garanti Verilerinin Çekilmesi 
# =============================================================================
    browser = webdriver.Firefox(options=options,executable_path=driver_path)
    
    url = "https://finance.yahoo.com/quote/GARAN.IS/history?p=GARAN.IS"
    browser.get(url)
    
    time.sleep(10)
    
    garan_open = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[2]/span')
    garan_high = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[3]/span')
    garan_low = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[4]/span')
    garan_close = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[5]/span')
    garan_volume = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[7]/span')
    
    garan_is = []
    garanti = [garan_open,garan_high,garan_low,garan_close,garan_volume]
    for i in garanti:
        garan_is.append((i.get_attribute('innerHTML')))
        
        
    browser.close()
# =============================================================================
#        Dolar Verilerinin Çekilmesi
# =============================================================================

    browser = webdriver.Firefox(options=options,executable_path=driver_path)
    url = "https://finance.yahoo.com/quote/TRY%3DX/history?p=TRY%3DX"
    browser.get(url)
    
    time.sleep(5)
    
    usd_try = browser.find_element_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr[2]/td[2]/span')
    
    dolar = usd_try.get_attribute('innerHTML')
    
    browser.close()

# =============================================================================
#       Petrol Verilerinin Çekilmesi
# =============================================================================

    browser = webdriver.Firefox(options=options,executable_path=driver_path)
    url = "https://tr.investing.com/commodities/crude-oil-historical-data"
    browser.get(url)
    
    time.sleep(5)
    
    ham_petrol = browser.find_element_by_xpath('//*[@id="curr_table"]/tbody/tr[2]/td[3]')
    
    petrol = ham_petrol.get_attribute('innerHTML')
    
    browser.close()

# =============================================================================
#     Verilerin Modele Sokulmadan Hazırlanması 
#     Eğitimde kullanılan veri sıralaması: <br>
#     Garan_Open, Garan_High, Garan_Low, Garan_Close, Garan_Volume, Bist_Open, Dolar_Open, Petrol_Open
# =============================================================================
    
    garan_is[-1] = garan_is[-1].replace(',', '')
    
    garanti_test = []
    for i in garan_is:
        garanti_test.append(float(i))


    bist100_open = bist100_open.replace('.','')
    bist100_open = bist100_open.replace(',','.')
    bist100_open= float(bist100_open)
    garanti_test.append(bist100_open)
    garanti_test.append(float(dolar))
    petrol = petrol.replace(',','.')
    garanti_test.append(float(petrol))
    garanti_np = np.asarray(garanti_test).reshape(-1,1)

    sc = StandardScaler()
    test = sc.fit_transform(garanti_np)
    
    
    
    X_test = test.reshape(test.shape[1], test.shape[0], 1)
    print('test verimiz:  ', garanti_test)
    return garanti_test, X_test 
Beispiel #29
0
 def setUpClass(cls):
     super().setUpClass()
     options = Options()
     options.headless = bool(os.environ.get("TRAVIS"))
     cls.selenium = webdriver.Firefox(options=options)
 def setUp(self):
     options = Options()
     options.headless = True
     self.driver = Firefox(options=options)
#BEGIN IMPORT STATEMENTS
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.keys import Keys
import requests
#END IMPORT STATEMENTS

#BEGIN GLOBAL VARIABLES
opts = Options()
opts.headless = True
assert opts.headless  #Operating in headless mode
browser = Firefox(options=opts)
browser.get('https://cfo.asu.edu/buildingmaintenance-outages-shutdowns')
#WEBEX VARIABLES
url = "https://api.ciscospark.com/v1/messages"
message = ""
body = {
    "roomId":
    "Y2lzY29zcGFyazovL3VzL1JPT00vODc0YmU4ZjAtMWMyOC0xMWVhLWJlYmQtMjk2MDA0YWI2MDdm",
    "text": message
}
headers = {
    'content-type': 'application/json',
    'authorization': 'Bearer INSERTBEARERTOKENHERE',
    'User-Agent': "JustMe",
    'Accept': "*/*",
}
#END GLOBAL VARIABLES

#BEGIN GATHER FINDINGS
results = browser.find_elements_by_id('pData')
Beispiel #32
0
    def run(self):

        if self.browser == "firefox":
            # webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
            #     "httpProxy": self.proxy,
            #     "ftpProxy": self.proxy,
            #     "sslProxy": self.proxy,
            #
            #     "proxyType": "MANUAL",
            #
            # }
            options = Options()
            options.headless = True
            self.driver = webdriver.Firefox(options=options)
        elif self.browser == "chrome":
            self.driver = webdriver.Chrome(
                chrome.ChromeDriverManager().install())
        elif self.browser == "opera":
            self.driver = webdriver.Opera()
        else:
            return "Wrong type of browser"
        # self.driver.get('https://www.expressvpn.com/what-is-my-ip')
        self.driver.get(self.url)
        sleep(14)
        element = self.driver.find_elements_by_name("emailAddress")
        while len(element) == 0:
            element = self.driver.find_elements_by_name("emailAddress")
        element[0].send_keys(self.login)
        self.driver.find_element_by_name("password").send_keys(self.password)
        self.driver.find_element_by_class_name(
            "nike-unite-component.nike-unite-submit-button").click()
        el_test = self.driver.find_elements_by_id("middleName")
        while len(el_test) != 1:
            sleep(2)
            element = self.driver.find_elements_by_class_name(
                "nike-unite-error-close")
            if len(element) != 0:
                element[0].click()
                self.driver.find_element_by_name("password").send_keys(
                    self.password)
                self.driver.find_element_by_class_name(
                    "nike-unite-component.nike-unite-submit-button").click()

            el_test = self.driver.find_elements_by_id("middleName")

        element = self.driver.find_elements_by_id("middleName")
        while len(element) != 1:
            element = self.driver.find_elements_by_id("middleName")
        element[0].send_keys(self.middle_name)
        element = self.driver.find_elements_by_xpath(
            "//button[@class='button-continue'][.='Сохранить и продолжить']")
        while len(element) != 0:
            element[0].click()
            element = self.driver.find_elements_by_xpath(
                "//button[@class='button-continue'][.='Сохранить и продолжить']"
            )
        sleep(1)
        element = self.driver.find_elements_by_class_name("stored-card-text")
        if len(element) != 0:
            iframeSwitch = self.driver.find_element_by_class_name("cvv")
            self.driver.switch_to.frame(iframeSwitch)
            self.driver.find_element_by_id("cardCvc-input").send_keys(
                self.cardCvc)
        else:
            iframeSwitch = self.driver.find_element_by_class_name("newCard")
            self.driver.switch_to.frame(iframeSwitch)
            self.driver.find_element_by_id("cardNumber-input").send_keys(
                self.cardNumber)
            self.driver.find_element_by_id("cardExpiry-input").send_keys(
                self.cardExpiry)
            self.driver.find_element_by_id("cardCvc-input").send_keys(
                self.cardCvc)
        self.driver.switch_to.parent_frame()

        element = self.driver.find_elements_by_xpath(
            "//button[@class='button-continue'][.=' Продолжить ']")
        while len(element) != 0:
            try:
                element[0].click()
            except:
                break
            element = self.driver.find_elements_by_xpath(
                "//button[@class='button-continue'][.=' Продолжить ']")

        element = self.driver.find_elements_by_xpath(
            "//button[@class='button-submit'][.=' Отправить заказ ']")
        wait_time = self.drop_time - int(datetime.now().timestamp())
        if wait_time < 0:
            wait_time = 0
        sleep(wait_time)
        while len(element) != 0:
            try:
                element[0].click()
            except:
                break
            element = self.driver.find_elements_by_xpath(
                "//button[@class='button-submit'][.=' Отправить заказ ']")
        print("Каеф")
Beispiel #33
0
 def __init__(self, tag):
     options = Options()
     options.headless = True
     self.tag = tag
     self.bot = webdriver.Firefox(options=options)
def buy_ticket(args):

    # FIREFOX DESTINATION FOLDER
    folder = os.path.dirname(os.path.realpath(__file__))

    # FIREFOX OPTIONS
    options = Options();
    options.headless = args.headless

    # FIREFOX PROFILE
    profile = FirefoxProfile()
    profile.set_preference('browser.download.folderList',2);
    profile.set_preference('browser.download.manager.showWhenStarting', False);
    profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/octet-stream,application/vnd.ms-excel');
    profile.set_preference('browser.download.dir',str(folder));
    log_info('FirefoxProfile: "browser.download.dir" = "{}"'.format(str(folder)))

    # Launch Firefox web browser.
    driver = webdriver.Firefox(options=options, firefox_profile=profile)

    # Use full screen mode.
    # driver.maximize_window()

    # Logging information
    log_info('Opened {} (version {})'.format(
            driver.capabilities['browserName'],
            driver.capabilities['browserVersion']
        )
    )

    # Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete.
    driver.implicitly_wait(30)

    # Load page 'Vyhľadanie spojenia'
    driver.get('https://ikvc.slovakrail.sk/esales/search')

    # Info
    log_info('Loading page "Vyhľadanie spojenia"')

    try:
        delay = 30  # wait seconds for web page to load, added more second

        WebDriverWait(driver, delay).until(
            EC.presence_of_all_elements_located(
                (By.ID, 'searchPanel')
            )
        )

        # Logging information
        log_info('Loaded page "Vyhľadanie spojenia"')

    except TimeoutException:
        log_info('Loading took too much time.')
        log_info('Closed {} (version {}).'.format(
               driver.capabilities['browserName'],
               driver.capabilities['browserVersion']
           )
        )
        driver.close()
        die('Page loading failure.',1)

    sleep(1)

    # Info
    log_info('Page title is "{}".'.format(driver.title))

    # Check if 'ZSSK' is in the page title
    assert 'ZSSK' in driver.title

    # FROM
    elem_city_from = driver.find_element_by_id('fromInput')
    elem_city_from.clear()
    elem_city_from.send_keys(args.departure)
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/div/div/form/div[1]/div/div[1]/div[1]/div[1]/ul/li/a'
    ).click
    log_info('Filling "Odkiaľ" in "Vyhľadanie spojenia" with "{}".'.format(args.departure))
    sleep(0.5)

    # TO
    elem_city_to = driver.find_element_by_id('toInput')
    elem_city_to.clear()
    elem_city_to.send_keys(args.arrival)
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/div/div/form/div[1]/div/div[1]/div[1]/div[3]/ul/li/a'
    ).click
    log_info('Filling "Kam" in "Vyhľadanie spojenia" with "{}".'.format(args.arrival))
    sleep(0.5)

    # DATE
    elem_date = driver.find_element_by_id('departDate')
    elem_date.clear()
    elem_date.send_keys(args.date)
    driver.find_element_by_xpath('//html').click();
    log_info('Filling "Dátum cesty tam" in "Vyhľadanie spojenia" with "{}".'.format(args.date))
    sleep(0.5)

    # TIME
    elem_time = driver.find_element_by_id('departTime')
    elem_time.clear()
    elem_time.send_keys(args.time)
    driver.find_element_by_xpath('//html').click();
    log_info('Filling "Odchod" in "Vyhľadanie spojenia" with "{}".'.format(args.time))
    sleep(0.5)

    log_info('Filled train credentials in "Vyhľadanie spojenia".')

    # CONFIRM
    driver.find_element_by_id('actionSearchConnectionButton').click()
    log_info('Clicked on "Vyhľadať spojenie".')
    sleep(2)

    # CLICK ON FIRST
    driver.find_element_by_css_selector(
        'div.connection-group:nth-child(2) > div:nth-child(1)'
    ).click()
    log_info('Clicked on first train.')
    sleep(1)

    # BUY TICKET
    driver.find_element_by_xpath(
        '//*[@id="dayGroupLoop:0:eSalesConnectionLoop:0:j_idt302"]'
    ).click()
    log_info('Clicked on "Kúpiť lístok".')
    sleep(1)

    # PASSENGER TYPE SELECTION
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/span/div/div[1]/form/div/div/div/div/div/div/div[1]/div[1]/div/div/div/div/a[1]/span[2]'
    ).click()
    log_info('Choosing passenger type.')
    sleep(1)

    # JUNIOR SELECTION
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/span/div/div[1]/form/div/div/div/div/div/div/div[1]/div[1]/div/div/div[1]/div/a[1]/ul/li[3]'
    ).click()
    log_info('Selected "Mladý (16 - 25 r.)".')
    sleep(1)

    # DISCOUNT SELECTION
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/span/div/div[1]/form/div/div/div/div/div/div/div[1]/div[1]/div/div/div/div/a[2]/span[2]'
    ).click()
    log_info('Choosing card type.')
    sleep(1)

    # CARD SELECTION
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/span/div/div[1]/form/div/div/div/div/div/div/div[1]/div[1]/div/div/div[1]/div/a[2]/ul/li[2]'
    ).click()
    log_info('Selected "Preukaz pre žiaka/Študenta".')
    sleep(1)

    # ENABLED OPTION FOR FREE TICKET
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/span/div/div[1]/form/div/div/div/div/div/div/div[1]/div[1]/div/div/div[2]/div/div/label'
    ).click()
    log_info('Checkbox enabled for "Nárok na bezplatnú prepravu".')
    sleep(1)

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="actionIndividualContinue"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Voľba cestujúcich".')
    sleep(3)

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="ticketsForm:connection-offer:final-price:j_idt198"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Voľba cestovného lístka".')
    sleep(1)

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="ticketsForm:j_idt97"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Doplnkové služby".')
    sleep(1)

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="cartForm:j_idt284"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Obsah košíka (1)".')
    sleep(1)

    # LOAD PERSONAL INFORMATION
    person = load_user_credentials('person.txt')

    # FILL EMAIL
    email = driver.find_element_by_id('email')
    email.clear()
    email.send_keys(person.email)
    log_info('Filling "Váš e-mail" at "Osobné údaje (2)".')

    # FILL NAME
    name = driver.find_element_by_id('cartItemLoop:0:connectionPersonal:passengerLoop:0:firstname')
    name.clear()
    name.send_keys(person.name)
    log_info('Filling "Meno" at "Osobné údaje (2)".')

    # FILL SURNAME
    surname = driver.find_element_by_id('cartItemLoop:0:connectionPersonal:passengerLoop:0:lastname')
    surname.clear()
    surname.send_keys(person.surname)
    log_info('Filling "Priezvisko" at "Osobné údaje (2)".')

    # FILL REGISTRATION NUMBER
    card_number = driver.find_element_by_id('cartItemLoop:0:connectionPersonal:passengerLoop:0:cislo-registracie-p1')
    card_number.clear()
    card_number.send_keys(person.train_card)
    log_info('Filling "Číslo registrácie:" at "Osobné údaje (2)".')

    log_info('All personal informations filled at "Osobné údaje (2)".')

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="j_idt177"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Osobné údaje (2)".')
    sleep(1)

    # I AGREE WITH THE TERMS AND CONDITIONS
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/div[2]/div/form/div/div/div[1]/div/div/div/label'
    ).click()
    log_info('Checkbox enabled for "Súhlasím s obchodnými podmienkami " at "Výber platby (3)".')
    sleep(1)

    # CONTINUE
    driver.find_element_by_xpath(
        '//*[@id="j_idt107"]'
    ).click()
    log_info('Clicked on "Pokračovať" at "Výber platby (3)".')
    sleep(1)

    # PAY
    driver.find_element_by_xpath(
        '//*[@id="cartForm:j_idt240"]'
    ).click()
    log_info('Clicked on "Zaplatiť" at "Súhrn (4)".')
    sleep(1)

    """
    TODO

    # DOWNLOAD PDF
    log_info('DOWNLOAD: Clicked on "Uložiť lístok".')
    driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[2]/div[3]/div[2]/div/form/div/div/div[3]/div[1]/a'
    ).click()

    log_info('DOWNLOAD: PDF downloaded to "{}".'.format(str(folder)))
    """

    # Waiting 10 seconds
    log_info('Waiting 10 seconds before closing {} (version {}).'.format(
            driver.capabilities['browserName'],
            driver.capabilities['browserVersion']
        )
    )
    sleep(10)

    # Close the web browser (Firefox).
    driver.close()

    # Info
    log_info('Closed {} (version {}).'.format(
            driver.capabilities['browserName'],
            driver.capabilities['browserVersion']
        )
    )
def paradox_polling():

    options = Options()
    options.headless = True

    # Set screen resolution to 1366 x 768 like most 15" laptops
    display = Display(visible=0, size=(1366, 768))
    display.start()

    driver = webdriver.Firefox(executable_path='/usr/local/bin/geckodriver',
                               options=options)

    ipaddress = os.environ['PARADOX_IPADDRESS']
    if not paradox_login(driver, ipaddress, os.environ['PARADOX_USERCODE'],
                         os.environ['PARADOX_PASSWORD']):
        print('Login failed.')
        driver.quit()
        exit()

    producer = boto3.client('firehose')

    try:

        #Getting Info
        print('Getting info..')
        driver.get('http://{}/index.html'.format(ipaddress))
        sleep(2)
        area_name = get_array_from_source('tbl_areanam', driver.page_source)
        area_name = [x.replace('"', '') for x in area_name]
        print('Area Name: {}'.format(area_name))

        zone_name = get_array_from_source('tbl_zone', driver.page_source)
        zone_name = [x.replace('"', '') for x in zone_name]
        zone_name = [x.replace(' ', '_') for x in zone_name]
        print('Zone Name: {}'.format(zone_name))

        if len(area_name) == 0:
            print('Server in use. Exit.')
            driver.quit()
            exit()

        if 'KEYPRESS_CHECK' in os.environ:
            print('Starting loop. Press ENTER to exit.')
        stay = True

        zone_status, last_zone_status = [], []
        area_status, last_area_status = [], []

        while stay:
            sleep(1)

            driver.get('http://{}/statuslive.html'.format(ipaddress))
            zone_status = get_array_from_source('tbl_statuszone',
                                                driver.page_source)
            zone_status = [int(x) for x in zone_status]
            area_status = get_array_from_source('tbl_useraccess',
                                                driver.page_source)
            area_status = [int(x) for x in area_status]

            if (len(zone_status) == 0) or (len(area_status) == 0):
                stay = False
            else:
                if (zone_status != last_zone_status) or (area_status !=
                                                         last_area_status):
                    print('Status Zone: {}'.format(zone_status))
                    print('Status Area: {}'.format(area_status))

                    firehose_record = {
                        'time':
                        datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
                    }
                    for i in range(len(area_status)):
                        if area_status[i] != 0:
                            firehose_record['area.{}.{}'.format(
                                i + 1, area_name[i])] = area_status[i]

                    for i in range(len(zone_name) // 2):
                        if int(zone_name[i * 2]) != 0:
                            firehose_record['area.{}.zone.{}'.format(
                                zone_name[i * 2],
                                zone_name[i * 2 + 1])] = zone_status[i]

                    response = producer.put_record(
                        DeliveryStreamName=os.environ['KINESIS_STREAM'],
                        Record={'Data': json.dumps(firehose_record) + '\n'})

            last_area_status = area_status
            last_zone_status = zone_status

            if 'KEYPRESS_CHECK' in os.environ:
                if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = input()
                    break

    finally:
        print('Logout..')
        driver.get('http://{}/logout.html'.format(ipaddress))
        sleep(1)
        driver.quit()
        display, stop()