コード例 #1
0
def main():

    
    # Scrape url
    URL = BASE_URL + "1&proposal=false&limit=0"

    login_link = "https://iqtools-grocery-prod.intrtl.com/site/login"
    link = "https://iqtools-grocery-prod.intrtl.com/validate?klass_id=5e7dc0691c007-3006&lots_id&size_id&confirm=&page=1&proposal=false&limit=0&klass_search="

    options = webdriver.FirefoxOptions()
    options.add_argument("--headless")
    driver = Firefox(options = options)

    try:
        response = driver.request('POST', login_link, data={
        #"_csrf-markup": "erEQFG7wJ6l9K9AHhbg_mLHT1aASSO_XzoY9DPPJgSogxChmJ7l1-h5Dvmb2_hLP3J74ynE62YGayV5LuKfHaA==",
        "LoginForm[name]": "elina.ducohinskaya",
        "LoginForm[password]": "8KGGpg14",
        "LoginForm[rememberMe]": "0",
        "login-button": ""
    })
    except TimeoutException as e:
        pass
    time.sleep(2)
    print(response)
コード例 #2
0
def get_driver():
    """Get Firefox driver
    
    Returns# the Firefox driver object and handles the path. 
    """
    configure_log(log_path)
    return Firefox(executable_path=gecko_path)
コード例 #3
0
        def webdriver_class():
            profile = FirefoxProfile()

            # Make sure Firefox WebDriver addon works, even if it could not be verified
            profile.set_preference('xpinstall.signatures.required', False)
            webdriver = Firefox(profile)
            return webdriver
コード例 #4
0
    def setup(self, headless=False, date_input=None):

        # Validate date variable
        if date_input is not None:
            self.date = datetime.strptime(date_input, "%d.%m.%Y").date()

        # Create directory if fetched folder is not available
        if not os.path.exists(self.__download_dir):
            os.mkdir(self.__download_dir)

        # Set headless option and firefox profile
        options = Options()
        options.headless = headless

        fp = webdriver.FirefoxProfile()
        fp.set_preference("browser.download.folderList", 2)
        fp.set_preference("browser.download.manager.showWhenStarting", False)
        fp.set_preference("browser.download.dir", self.__download_dir)
        fp.set_preference(
            "browser.helperApps.neverAsk.saveToDisk",
            "text/plain, application/vnd.ms-excel, text/csv, text/comma-separated-values, "
            "application/octet-stream")

        # Initialize Firefox() object to navigate
        self.driver = Firefox(firefox_profile=fp, options=options)

        return self
コード例 #5
0
ファイル: base_page.py プロジェクト: kkkzzg/jd_spider
def set_up_browser():
    profile = webdriver.FirefoxProfile()
    profile.set_preference("dom.max_script_run_time", 600)
    profile.set_preference("webdriver.log.file", "/tmp/firefox_console")
    driver = Firefox(firefox_profile=profile)
    driver.set_window_size(1920, 1080)
    driver.implicitly_wait(LONG)
    return driver
コード例 #6
0
 def browser(self, url):
     try:
         driver = Firefox(options=self.options)
         headers = self.headers
         result = driver.request(self.method, url, data=self.data)
         import pry;pry()
         return driver
     except Exception as e:
         raise SeleniumError(f"Error setting instance of Firefox.\nError: {e}")
コード例 #7
0
def make_webdriver():
    capabilities = DesiredCapabilities.FIREFOX

    # We're pinning to (outdated) Firefox 45.0.2 for now, which doesn't
    # work with the new Marionette/geckodriver stuff - it uses webdriver
    # instead. Disable marionette.
    capabilities["marionette"] = False

    return Firefox(capabilities=capabilities)
コード例 #8
0
    def browser(self, url):
        try:
            driver = Firefox(options=self.options)
            driver.get(url)

            return driver
        except Exception as e:
            raise SeleniumUatError(
                f"Error setting instance of Firefox.\nError: {e}")
def check_ajax_page_for_msg(label, msg, url):
    try:
        # headers = {
        #     "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        #     "Accept-Encoding":"gzip, deflate, br",
        #     "Accept-Language":"en-US,en;q=0.5",
        #     "Cache-Control":"no-cache",
        #     "Connection":"keep-alive",
        #     "Host":"www.nvidia.com",
        #     "Pragma":"no-cache",
        #     "Referer":"https://www.google.com/",
        #     "TE":"Trailers",
        #     "Upgrade-Insecure-Requests":"1",
        #     "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0"
        # }

        webdriver = Firefox()
        webdriver.implicitly_wait(30)
        response = webdriver.request('GET', url)

        response2 = webdriver.request('GET', 'https://www.nvidia.com/etc/designs/nvidiaGDC/clientlibs_foundation.min.3a16fd19562feeb504bb63525a249962.js')
        webdriver.execute_script(response2.text)

        # webdriver.find_element_by_class_name('availability')

        # with open('clientlibs_foundation.min.3a16fd19562feeb504bb63525a249962.js') as f:
        #     webdriver.execute_script(f.read())

        # element = WebDriverWait(webdriver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "availability")))

        wait_for_ajax(webdriver)

        # webdriver.find_element_by_class_name('availability')

        if response.status_code != 200:
            send_sms_msg('Attention needed. Got a response other than 200 OK from ' + label + ' page.')

        html = response.text

        soup = BeautifulSoup(html, 'html.parser')

        disclaimer_msg_present = msg in html

        html_file = open(label + '.html', 'w', encoding='utf-8')
        html_file.write(html)
        html_file.close()
        availability = soup.find('div', {'class', 'availability'})

        print('Value of disclaimer_msg_present for ' + label + ' page: ' + str(disclaimer_msg_present))
        if not disclaimer_msg_present:
            send_sms_msg('ATTENTION! ' + label + ' page message has changed.')

        print('\nResponse from ' + label + ' page link: ' + str(response.status_code) + '\n')
    except Exception as e:
        send_sms_msg('Exception occurred while sending request to Ironmaster website.')
        print(e)
コード例 #10
0
def connect(browser='firefox', session=None):
  from seleniumrequests import Firefox
  driver = Firefox()
  driver.get("https://www.backstabbr.com/")

  # Restore previous session if specified
  if session:
    for cookie in pickle.load(open(session, "rb")):
      driver.add_cookie(cookie)

  return driver
コード例 #11
0
ファイル: checkbrowser.py プロジェクト: shokone/XSSChecker
def _firefox(show):
    # declare options
    opt = firefox.options.Options()

    if show is False:
        opt.headless = True

    browser = Firefox(options=opt,
                      executable_path='driver/linux/geckodriver',
                      log_path="log/geckodriver.log")

    return browser
コード例 #12
0
def get_firefox():
    caps = DesiredCapabilities().FIREFOX
    options = FirefoxOptions()
    options.add_argument("--headless")
    caps["pageLoadStrategy"] = "eager"  # interactive
    profile = FirefoxProfile()
    profile.set_preference("dom.disable_beforeunload", True)
    browser = Firefox(desired_capabilities=caps,
                      firefox_profile=profile,
                      options=options)
    browser.set_page_load_timeout(6)
    return browser
コード例 #13
0
    def __init__(self):
        self.options = Options()
        self.options.set_preference("intl.accept_languages", 'en-us')
        self.options.set_preference('useAutomationExtension', False)
        self.options.set_preference('dom.webdriver.enabled', False)

        self.driver = Firefox(options=self.options)
        self.driver.execute_script(
            "Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"
        )
        self.driver.get("https://amiunique.org/fp")
        time.sleep(5)
        self.driver.get("https://antcpt.com/score_detector/")
        time.sleep(5)
コード例 #14
0
    def get_driver(self):
        profile = FirefoxProfile()
        profile.set_preference('browser.download.folderList', 2)
        profile.set_preference('browser.download.manager.showWhenStarting',
                               False)
        profile.set_preference('browser.download.dir', TEMP_PATH)
        profile.set_preference(
            'browser.helperApps.neverAsk.saveToDisk',
            ('application/csv,text/csv,application/vnd.ms-excel,'
             'application/x-msexcel,application/excel,'
             'application/x-excel,text/comma-separated-values'))

        driver = Firefox(firefox_profile=profile, executable_path=GECKO_PATH)
        return driver
コード例 #15
0
def main(args=None):
    args = parse_args(args)
    webdriver = Firefox()
    webdriver.get('https://serverdensity.io')
    response = webdriver.request('POST',
                                 '{}/sessions'.format(DOMAIN),
                                 data={
                                     'username': args.username,
                                     'password': args.password,
                                     'MFAotpcode': 'blaap',
                                     'MFAremember': 'false',
                                     'accountName': args.username
                                 })
    print(response)
コード例 #16
0
def init_driver(interactive):
    """
    Initializes the (Firefox) driver.

    :param interactive: whether to run in interactive mode (ie displaying the browser), required for when off-campus
                        due to the interactive 2FA
    :type interactive: bool
    :return: the driver instance
    :rtype: webdriver.Firefox
    """

    logger().debug("initializing driver (interactive=%s)" % str(interactive))
    options = webdriver.FirefoxOptions()
    options.headless = not interactive
    return Firefox(options=options)
コード例 #17
0
ファイル: tests.py プロジェクト: brfurlan/geonode
    def setUpClass(cls):
        super().setUpClass()

        try:
            """ Instantiate selenium driver instance """
            binary = FirefoxBinary('/usr/bin/firefox')
            opts = FirefoxOptions()
            opts.add_argument("--headless")
            executable_path = GeckoDriverManager().install()
            cls.selenium = Firefox(firefox_binary=binary,
                                   firefox_options=opts,
                                   executable_path=executable_path)
            cls.selenium.implicitly_wait(10)
        except Exception as e:
            logger.error(e)
コード例 #18
0
def driver():
    use_chromedriver = True
    if use_chromedriver:
        capabilities = DesiredCapabilities.CHROME
        capabilities['loggingPrefs'] = {
            'browser': 'ALL',
            # 'driver': 'ALL',
        }
        options = webdriver.ChromeOptions()
        options.add_argument('headless')
        wd = Chrome(options=options, desired_capabilities=capabilities)
    else:
        options = webdriver.FirefoxOptions()
        options.add_argument('--headless')
        wd = Firefox(firefox_options=options)
    yield wd
    if use_chromedriver:
        print('Browser', wd.get_log('browser'))
        # print('Driver', wd.get_log('driver'))
    wd.quit()
コード例 #19
0
ファイル: __init__.py プロジェクト: bmeares/apex
def get_driver(debug: bool = False):
    """
    Returns an alive Firefox WebDriver
    """
    global driver

    ### webdriver with features from the normal requests lib
    from seleniumrequests import Firefox
    ### we need options to start a headless firefox instance
    from selenium.webdriver.firefox.options import Options

    from selenium.webdriver.remote.command import Command
    is_alive = None
    try:
        driver.execute(Command.STATUS)
        is_alive = True
    except:
        is_alive = False

    if not is_alive:
        browser_options = Options()
        browser_options.add_argument('--headless')
        browser_options.add_argument('--window-size=1920x1080')
        driver = Firefox(options=browser_options,
                         executable_path=geckodriver_location)

    ### load existing cookies
    if cookies_path.exists():
        driver.get(urls['login'])
        if debug:
            dprint("Found existing cookies. Attempting to reuse session...")
        import pickle
        with open(cookies_path, 'rb') as cookies_file:
            cookies = pickle.load(cookies_file)
        for cookie in cookies:
            driver.add_cookie(cookie)

    return driver
コード例 #20
0
ファイル: views.py プロジェクト: TrueTony/Svetlovka
def close_up(request):
    print('start close_up')
    webdriver = Firefox()

    userlink = request.user.profile.link

    # список для реверса
    ll = []
    with open(f'files_of_users/links_of_books_{userlink}.txt',
              'r',
              encoding='utf-8') as f:
        if not os.path.exists(f'files_of_users/list_of_books_{userlink}.txt'):
            open(f'files_of_users/list_of_books_{userlink}.txt',
                 'w',
                 encoding='utf 8').close()
        with open(f'files_of_users/list_of_books_{userlink}.txt',
                  'r',
                  encoding='utf 8') as d:
            list_of_books = d.read()
            # нужен реверс, т.к. в список ссылок новые книги идут первыми, а не последними
            for link in f:
                ll.append(link)
            for link in reversed(ll):
                link = link.replace('\n', '')
                print('\n', link)
                if link not in list_of_books:
                    print('Обрабатывается', link)
                    # sleep против капчи
                    time.sleep(5)

                    r = webdriver.request('GET', link)
                    soup = BeautifulSoup(r.content, 'lxml')

                    # для обработки ошибок
                    with open('files_of_users/current_book.txt',
                              'w',
                              encoding='utf-8') as f:
                        f.write(soup.prettify())

                    overview = [link]

                    book = soup.find('div', class_='block-border card-block')
                    author = []
                    if book.find('h2', class_='author-name unreg'):
                        authors = book.find('h2', class_='author-name unreg')
                        names = authors.find_all('a')
                        for name in names:
                            author.append(name.text)
                        overview.append(author)
                    else:
                        author.append('Сборник')
                        overview.append(author)
                    title = book.span.text
                    overview.append(title)
                    tags = book.find_all('a', class_='label-genre')
                    list_of_tags = []
                    for tag in tags:
                        if tag.text.startswith('№'):
                            tag = tag.text.split('в\xa0')[1]
                            list_of_tags.append(tag)
                        else:
                            list_of_tags.append(tag.text)
                    overview.append(list_of_tags)
                    cover = book.find('img', id='main-image-book')['src']
                    overview.append(cover)
                    if book.find('span', itemprop='ratingValue'):
                        rating = book.find('span', itemprop='ratingValue').text
                    else:
                        rating = 0
                    overview.append(rating)
                    description = book.p.text
                    overview.append(description)

                    data = []
                    if os.stat(f'files_of_users/list_of_books_{userlink}.txt'
                               ).st_size != 0:
                        with open(
                                f'files_of_users/list_of_books_{userlink}.txt',
                                'r') as f:
                            old = json.load(f)
                            for i in old:
                                data.append(i)

                    data.append(overview)
                    with open(f'files_of_users/list_of_books_{userlink}.txt',
                              'w') as f:
                        json.dump(data, f)
                    print('Обработана')

                else:
                    print('Уже обработана', link)

    webdriver.close()
    print('finish close_up')
    return render(request, 'liv/test.html')
コード例 #21
0
# Import any WebDriver class that you would usually import from
# selenium.webdriver from the seleniumrequests module
import sys
from seleniumrequests import Firefox

url = sys.argv[1]
# Simple usage with built-in WebDrivers:
webdriver = Firefox()
response = webdriver.request(
    'GET', '%s/xss.php?xss=<script>document.write(INJECTX)</script>' % url)
if '<script>document.write(INJECTX)</script>' in response.text:
    print("Vulnerable!")
print(response.text)
webdriver.quit()
SECONDARY_COMMANDS = ''
コード例 #22
0
ファイル: chbio_scraper.py プロジェクト: doldol1/Crawlaper
        if i==1000000:
            URL=URL[:-6]+str(i)
        else:
            URL=URL[:-7]+str(i)

    return URL




txt_file=open('chbio_cos.txt', 'w', encoding='utf-8')

j=1
while(j<=2235):
    # web_driver=webdriver.Chrome('D:\scrapper\web_driver\chromedriver.exe')
    web_driver=Firefox()
    # web_driver.get(url)
    web_driver.request('POST', url, data={"tableId": "68", "State":"1", "bcId":"138009396676753955941050804482")
    bs_tmp=BeautifulSoup(web_driver.page_source, 'lxml')
    the_list=bs_tmp.find_all('a')
    
    for i in the_list:
        print(i.get_text())
        txt_file.write(i.get_text()+'\n')

    # web_driver.find_element_by_name('goInt').clear()
    # web_driver.find_element_by_name('goInt').send_keys(str(j))
    # web_driver.find_element_by_xpath("//div[@id='content']/div/table[4]/tbody/tr/td[7]/input").click()
    j+=1
    url=chkURL(j, url)
    web_driver.quit()
コード例 #23
0
ファイル: stream.py プロジェクト: bmsiegel/Band-Streaming
from selenium import webdriver
from seleniumrequests import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import os
import subprocess
import clipboard

GMF_BAND_ID = 79162159
JBS_BAND_ID = 79463189

browser = Firefox()
browser.get('https://auth.band.us/email_login')

browser.implicitly_wait(10)

textBox = browser.find_element_by_xpath('//*[@id="email_login_form"]')

textBox.find_element_by_id('input_email').send_keys('', Keys.RETURN)

time.sleep(2.5)

browser.find_element_by_id('pw').send_keys('', Keys.RETURN)

time.sleep(5)

browser.get('https://band.us/band/{}/create-live'.format(JBS_BAND_ID))

time.sleep(5)
browser.find_element_by_xpath(
コード例 #24
0
            'firstname': xing_id['firstName'],
            'lastname': xing_id['lastName'],
            'contact_since': raw['contactCreatedAt'],
            'note': raw['memo'],
            'org': xing_id['profileOccupation']['occupationOrg'],
            'title': xing_id['profileOccupation']['occupationTitle']
        }
        if contact['note'] is None:
            contact['note'] = ''
        results.append(contact)
    return results


all_contacts = []

with contextlib.closing(Firefox()) as driver:
    wait = ui.WebDriverWait(driver, 300)
    driver.get('https://www.xing.com/settings')
    wait.until(lambda driver: str(driver.current_url).startswith(start_url))
    raw_contacts = load_contact_list(driver, 25)
    all_contacts = parse_contacts(raw_contacts)

workbook = xlsxwriter.Workbook('XingNetwork.xlsx')
contacts_sheet = workbook.add_worksheet('Contacts')

col_width = [0] * 5 + [12]

cols = []
for col in ['Name', 'First Name', 'Organisation', 'Title', 'Note', 'Xing-Profile']:
    cols.append({'header': col})
コード例 #25
0
def driver():
    os.environ['MOZ_HEADLESS'] = '1'
    with Firefox() as driver:
        yield driver
コード例 #26
0
ファイル: checkout.py プロジェクト: nmg0721/yzysupply
def atc():
    browser = Firefox()
    browser2 = Firefox()

    getLink()
    link = "https://yeezysupply.com/products/womens-tubular-boot-pvc-transparent"
    atc = "https://yeezysupply.com/cart/add.js"
    size = variants[input("Enter size: ")]
    payload = {"quantity": "1", "id": size}
    input("Press Enter load and add to cart...")

    # -------------- Go to link and ATC---------------
    browser.get(link)
    response = browser.request('POST', atc, data=payload)
    browser.get("https://yeezysupply.com/cart")
    browser.execute_script(
        "document.getElementsByClassName('K__button CA__button-checkout')[0].click();"
    )

    browser2.get(link)
    response = browser2.request('POST', atc, data=payload)
    browser2.get("https://yeezysupply.com/cart")
    browser2.execute_script(
        "document.getElementsByClassName('K__button CA__button-checkout')[0].click();"
    )

    # -------------- Go to shipping --------------
    input("CONTINUE TO SHIPPING...")
    for i in checkoutPayload:
        inputMsg = browser.find_element_by_id(i[0])
        inputMsg.send_keys(i[1])

    mySelect = Select(
        browser.find_element_by_id("checkout_shipping_address_province"))
    mySelect.select_by_value('Maryland')
    browser.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )

    for i in checkoutPayload:
        inputMsg = browser2.find_element_by_id(i[0])
        inputMsg.send_keys(i[1])

    mySelect = Select(
        browser2.find_element_by_id("checkout_shipping_address_province"))
    mySelect.select_by_value('Maryland')
    browser2.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )

    # -------------- Go to payment --------------
    input("CONTINUE TO PAYMENT METHOD...")
    browser.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )
    browser2.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )

    # -------------- Fill card --------------
    input("FILL CREDIT CARD...")
    eachFrame = 0
    for i in creditCard:
        frame = browser.find_elements_by_xpath(
            '//iframe[@frameborder="0"]')[eachFrame]
        browser.switch_to.frame(frame)
        inputMsg = browser.find_element_by_id(i[0])
        for e in range(0, len(i)):
            inputMsg.send_keys(i[e])
        browser.switch_to.default_content()
        eachFrame += 1

    eachFrame = 0
    for i in creditCard:
        frame = browser2.find_elements_by_xpath(
            '//iframe[@frameborder="0"]')[eachFrame]
        browser2.switch_to.frame(frame)
        inputMsg = browser2.find_element_by_id(i[0])
        for e in range(0, len(i)):
            inputMsg.send_keys(i[e])
        browser2.switch_to.default_content()
        eachFrame += 1

    # -------------- FINAL STEP CHECKOUT --------------
    print_warn("CHECKOUT?")
    input("")
    browser.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )
    browser2.execute_script(
        "document.getElementsByClassName('step__footer__continue-btn btn')[0].click();"
    )

    time.sleep(10)
    browser.quit()
コード例 #27
0
class RequestService:
    web_driver = Firefox()

    def get_product_info(self, product_request) -> Product:
        dom_model = self.form_soup_dom_model(product_request.url)
        product_rating = dom_model.find('span', {"class": "rating"})
        image_link = dom_model.find('div', {
            "class": "mainpic"
        }).find("img")["src"]
        product = Product()
        product.rating = product_rating.text
        product.image_url = image_link
        product.link_url = product_request.url
        product.name = product_request.product_name
        link_list = self.create_review_link_list(
            dom_model.find('ul', {"class": "list-comments"}))
        print(link_list)

        page_count = 1
        is_not_final_page = True

        while is_not_final_page:
            first_link_on_current_page = link_list[0]
            review_set = self.form_review_set(review_link_list=link_list)
            product.reviews.extend(review_set)
            dom_model = self.form_soup_dom_model(product_request.url +
                                                 "?page=" + str(page_count))
            link_list = self.create_review_link_list(
                dom_model.find('ul', {"class": "list-comments"}))
            first_link_on_next_page = link_list[0]
            page_count += 1
            is_not_final_page = first_link_on_current_page != first_link_on_next_page

        return product

    def form_soup_dom_model(self, url) -> BeautifulSoup:
        response = self.web_driver.request('GET', url)
        return BeautifulSoup(response.text, 'html.parser')

    def form_review_set(self, review_link_list) -> set:
        reviews = set()
        for review_link in review_link_list:
            dom_model = self.form_soup_dom_model(review_link)
            review = Review()
            review.rating = dom_model.find(
                'meta', {"itemprop": "ratingValue"})["content"]
            review.body = dom_model.find('div', {
                "itemprop": "reviewBody"
            }).text
            review.title = dom_model.find('h2', {
                "class": "reviewTitle"
            }).find("a").text
            review.read_link = review_link
            review.post_time = dom_model.find('span', {
                "class": "dtreviewed"
            }).text
            review.reviewer_name = dom_model.find("strong", {
                "class": "reviewer"
            }).find("a").text
            print("Получен отзыв: " + repr(review))
            reviews.add(review)
        return reviews

    # Создать список с урлами на отзывы
    def create_review_link_list(self, ulElement) -> []:
        review_link_list = []
        for sub in ulElement.find_all('li', recursive=False):
            review_link = sub.find('div', {
                "class": "reviewTitle"
            }).find("a")["href"]
            review_link_list.append(IRECOMMEND_DOMAIN_NAME + review_link)
        return review_link_list
コード例 #28
0
def get_driver():
    return Firefox(
        executable_path=
        'C:/Users/aperusse/GitHub/cy-automation-library/geckodriver/geckodriver.exe'
    )
コード例 #29
0
ファイル: main.py プロジェクト: Galyaviev/Next.uk-parser
def start():
    list = select_all()
    for date in list:
        login = date['login']
        password = date['password']
        try:
            print('Запускаю Браузер')
            options = Options()
            options.headless = True
            browser = Firefox(options=options)
            browser.delete_all_cookies()
            browser.set_window_position(0, 0)
            browser.set_window_size(1024, 1024)
            browser.get('https://www.next.co.uk/secure/account/Login')
            login_input = browser.find_element_by_id('EmailOrAccountNumber')
            ActionChains(browser).move_to_element(login_input).perform()
            paswword_input = browser.find_element_by_id('Password')
            ActionChains(browser).move_to_element(paswword_input).perform()
            paswword_input.send_keys(random.choice(rany))
            login_input.send_keys(login)
            paswword_input.send_keys(password)
            paswword_input.send_keys(random.choice(rany))
            paswword_input.send_keys(random.choice(rany))
            paswword_input.send_keys(Keys.ENTER)
            time.sleep(random.uniform(3, 5))
            soup = BeautifulSoup(browser.page_source, 'lxml')
            titleTag = soup.find('title').text
            print(titleTag)

            while titleTag == 'Access Denied':
                browser.quit()
                print('close')
                proxy_list = choice(get_proxy())
                proxy_host = proxy_list['ip']
                proxy_port = int(proxy_list['port'])
                print(proxy_host, proxy_port)
                options = Options()
                options.headless = True
                fp = webdriver.FirefoxProfile()
                fp.set_preference("browser.privatebrowsing.autostart", True)
                fp.set_preference("network.proxy.type", 1)
                fp.set_preference("network.proxy.http", proxy_host)
                fp.set_preference("network.proxy.http_port", proxy_port)
                fp.set_preference("network.proxy.https", proxy_host)
                fp.set_preference("network.proxy.https_port", proxy_port)
                fp.set_preference("network.proxy.ssl", proxy_host)
                fp.set_preference("network.proxy.ssl_port", proxy_port)
                fp.set_preference("network.proxy.ftp", proxy_host)
                fp.set_preference("network.proxy.ftp_port", proxy_port)
                fp.set_preference("network.proxy.socks", proxy_host)
                fp.set_preference("network.proxy.socks_port", proxy_port)
                fp.update_preferences()
                print('open browser')
                browser = Firefox(options=options, firefox_profile=fp)
                browser.set_window_position(0, 0)
                browser.set_window_size(1024, 648)
                browser.get('https://api.ipify.org/')
                test_ip = browser.find_element_by_tag_name('pre').text
                print(test_ip + ' ip полученное с сайта')
                print(proxy_list['ip'] + ' ip полученное с прокси')

                if test_ip == proxy_list['ip']:
                    browser.delete_all_cookies()
                    browser.get('https://www.next.co.uk/secure/account/Login')
                    login_input = browser.find_element_by_id(
                        'EmailOrAccountNumber')
                    ActionChains(browser).move_to_element(
                        login_input).perform()
                    paswword_input = browser.find_element_by_id('Password')
                    ActionChains(browser).move_to_element(
                        paswword_input).perform()
                    paswword_input.send_keys(random.choice(rany))
                    login_input.send_keys(login)
                    paswword_input.send_keys(password)
                    paswword_input.send_keys(random.choice(rany))
                    paswword_input.send_keys(random.choice(rany))
                    paswword_input.send_keys(Keys.ENTER)
                    time.sleep(random.uniform(3, 5))
                    soup = BeautifulSoup(browser.page_source, 'lxml')
                    titleTag = soup.find('title').text
                    print(titleTag)

            browser.get('https://www2.next.co.uk/shoppingbag')
            time.sleep(random.uniform(3, 5))
            r = browser.request('GET', basket)
            write_log(r.json())
            data = browser.request('GET', basket).json()
            coint_bug = len(data["ShoppingBag"]['Items'])
            a = coint_bug - 1

            while coint_bug >= 1:
                message = data["ShoppingBag"]['Items'][a]["StockMessage"]
                thing = data["ShoppingBag"]['Items'][a]["Description"]
                items = data["ShoppingBag"]['Items'][a]["ItemNumber"]
                size = data["ShoppingBag"]['Items'][a]["SizeDescription"]

                if "In Stock" in message:
                    coint_bug = coint_bug - 1
                    a = a - 1
                    text_telegram = 'в этом логине нашлось ' + login + ': ' + thing + ' ' + items + ' ' + size
                    web_get = tgapi + '/sendmessage?chat_id={}&text={}'.format(
                        chat_id, text_telegram)

                    requests.get(web_get)
                    print(message)

                else:
                    coint_bug = coint_bug - 1
                a = a - 1

            print(login)
            print(datetime.datetime.now())
            browser.quit()
        except Exception as e:
            with open('error.txt', 'a') as f:
                f.write(str(e) + '\n')
コード例 #30
0
def close_up(request):
    webdriver = Firefox()

    user_link = request.user.profile.link

    # list need for reverse
    reversed_list = []
    with open(f'files_of_users/links_of_books_{user_link}.txt', 'r', encoding='utf-8') as f:
        if not os.path.exists(f'files_of_users/list_of_books_{user_link}.txt'):
            open(f'files_of_users/list_of_books_{user_link}.txt', 'w', encoding='utf 8').close()
        with open (f'files_of_users/list_of_books_{user_link}.txt', 'r', encoding='utf 8') as d:
            list_of_books = d.read()
            # there is need reverse, because new books go to the link list first, not last
            for link in f: reversed_list.append(link)
            for link in reversed(reversed_list):
                link = link.replace('\n', '')
                if link not in list_of_books:
                    r = webdriver.request('GET', link)
                    soup = BeautifulSoup(r.content, 'lxml')

                    overview = [link]
                  
                    book = soup.find('div', class_='block-border card-block')
                    author = []
                    if book.find('h2', class_='author-name unreg'):
                        authors = book.find('h2', class_='author-name unreg')
                        names = authors.find_all('a')    
                        for name in names:
                            author.append(name.text)
                        overview.append(author)
                    else:
                        author.append('Сборник')
                        overview.append(author)
                    title = book.span.text
                    overview.append(title)
                    tags = book.find_all('a', class_='label-genre')
                    list_of_tags = []
                    for tag in tags:
                        if tag.text.startswith('№'):
                            tag = tag.text.split('в\xa0')[1]
                            list_of_tags.append(tag)
                        else:
                            list_of_tags.append(tag.text)
                    overview.append(list_of_tags)
                    cover = book.find('img', id='main-image-book')['src']
                    overview.append(cover)
                    if book.find('span', itemprop='ratingValue'):
                        rating = book.find('span', itemprop='ratingValue').text
                    else:
                        rating = 0
                    overview.append(rating)
                    description = book.p.text
                    overview.append(description)

                    data = []
                    if os.stat(f'files_of_users/list_of_books_{user_link}.txt').st_size != 0:
                        with open(f'files_of_users/list_of_books_{user_link}.txt', 'r') as f:
                            old = json.load(f)
                            for i in old:
                                data.append(i)

                    data.append(overview)
                    with open(f'files_of_users/list_of_books_{user_link}.txt', 'w') as f:
                        json.dump(data, f)

    webdriver.close()
    return render(request, 'liv/test.html')