Esempio n. 1
0
def RespCode(domain):
    webdriver = Chrome()
    domain = "http://www." + str(domain)
    response = webdriver.request('GET', domain)
    response = str(response)[11:][:-2]
    webdriver.close()
    return response
Esempio n. 2
0
 def tistory_post(self, blog_name, title, content, category):
     webdriver = Chrome(self.chromedriver_path)
     response = webdriver.request('POST',
                                  'https://www.tistory.com/apis/post/write',
                                  data={
                                      "access_token": self.tistory_token,
                                      "blogName": blog_name,
                                      'title': title,
                                      'content': content,
                                      'category': category,
                                      'visibility': '2'
                                  })
     webdriver.quit()
     print(response)
	def get_alarm_status():
		url = LOGIN_URL

		display = Display(visible=0, size=(800, 600))
		display.start()

		browser = webdriver.Chrome()
		browser.get(url)

		response = webdriver.request('GET', url)
		_LOGGER.debug('Response status from AdtPulse.com: %s - %s', response.status, response.statusText)

		username = browser.find_element_by_name('usernameForm')
		username.send_keys(config.ADTPULSE_USERNAME)

		password = browser.find_element_by_name('passwordForm')
		password.send_keys(config.ADTPULSE_PASSWORD)

		browser.find_element_by_name('signin').click()

		time.sleep(20)
		html = browser.page_source
		postlogin_url = browser.url
		_LOGGER.debug('Post Login URL from AdtPulse.com: %s ', postlogin_url)

		soup = BeautifulSoup(html, 'lxml')

		currentStatus = soup.findAll('div', { 'id': 'divOrbContent' })
		_LOGGER.debug('Current Alarm Status DIV from AdtPulse.com: %s ', currentStatus )

		if currentStatus:
			for status in currentStatus:
				status_textsummary = status.find_element_by_id('divOrbTextSummary')
				_LOGGER.debug('Status Text DIV from AdtPulse.com: %s - %s', status_textsummary)

		if status_textsummary:
			for text in status_textsummary:
				status_text = status.findAll('span', { 'class': 'p_boldNormalTextLarge'})
				_LOGGER.debug('Status Text DIV from AdtPulse.com: %s - %s', status_text)
		        for string in alarm_status.strings:
                    if "." in string:
                        param, value = string.split(".",1)
                adtpulse_alarmstatus = param
                state = adtpulse_alarmstatus
				
		browser.quit()
		display.stop()

		return state
Esempio n. 4
0
def tistory_post(token, title, content, category):
    webdriver = Chrome()
    response = webdriver.request('POST', 'https://www.tistory.com/apis/post/write', data={"access_token": token, "blogName": "sfixer", 'title': title, 'content': content, 'category': category, 'visibility': '2'})
    webdriver.quit()
    print(response)
Esempio n. 5
0
def close_up(request):
    webdriver = Firefox()

    user_link = request.user.profile.link

    # list need for reverse
    reversed_list = []
    with open(f'files_of_users/links_of_books_{user_link}.txt', 'r', encoding='utf-8') as f:
        if not os.path.exists(f'files_of_users/list_of_books_{user_link}.txt'):
            open(f'files_of_users/list_of_books_{user_link}.txt', 'w', encoding='utf 8').close()
        with open (f'files_of_users/list_of_books_{user_link}.txt', 'r', encoding='utf 8') as d:
            list_of_books = d.read()
            # there is need reverse, because new books go to the link list first, not last
            for link in f: reversed_list.append(link)
            for link in reversed(reversed_list):
                link = link.replace('\n', '')
                if link not in list_of_books:
                    r = webdriver.request('GET', link)
                    soup = BeautifulSoup(r.content, 'lxml')

                    overview = [link]
                  
                    book = soup.find('div', class_='block-border card-block')
                    author = []
                    if book.find('h2', class_='author-name unreg'):
                        authors = book.find('h2', class_='author-name unreg')
                        names = authors.find_all('a')    
                        for name in names:
                            author.append(name.text)
                        overview.append(author)
                    else:
                        author.append('Сборник')
                        overview.append(author)
                    title = book.span.text
                    overview.append(title)
                    tags = book.find_all('a', class_='label-genre')
                    list_of_tags = []
                    for tag in tags:
                        if tag.text.startswith('№'):
                            tag = tag.text.split('в\xa0')[1]
                            list_of_tags.append(tag)
                        else:
                            list_of_tags.append(tag.text)
                    overview.append(list_of_tags)
                    cover = book.find('img', id='main-image-book')['src']
                    overview.append(cover)
                    if book.find('span', itemprop='ratingValue'):
                        rating = book.find('span', itemprop='ratingValue').text
                    else:
                        rating = 0
                    overview.append(rating)
                    description = book.p.text
                    overview.append(description)

                    data = []
                    if os.stat(f'files_of_users/list_of_books_{user_link}.txt').st_size != 0:
                        with open(f'files_of_users/list_of_books_{user_link}.txt', 'r') as f:
                            old = json.load(f)
                            for i in old:
                                data.append(i)

                    data.append(overview)
                    with open(f'files_of_users/list_of_books_{user_link}.txt', 'w') as f:
                        json.dump(data, f)

    webdriver.close()
    return render(request, 'liv/test.html')
Esempio n. 6
0
def close_up(request):
    print('start close_up')
    webdriver = Firefox()

    userlink = request.user.profile.link

    # список для реверса
    ll = []
    with open(f'files_of_users/links_of_books_{userlink}.txt',
              'r',
              encoding='utf-8') as f:
        if not os.path.exists(f'files_of_users/list_of_books_{userlink}.txt'):
            open(f'files_of_users/list_of_books_{userlink}.txt',
                 'w',
                 encoding='utf 8').close()
        with open(f'files_of_users/list_of_books_{userlink}.txt',
                  'r',
                  encoding='utf 8') as d:
            list_of_books = d.read()
            # нужен реверс, т.к. в список ссылок новые книги идут первыми, а не последними
            for link in f:
                ll.append(link)
            for link in reversed(ll):
                link = link.replace('\n', '')
                print('\n', link)
                if link not in list_of_books:
                    print('Обрабатывается', link)
                    # sleep против капчи
                    time.sleep(5)

                    r = webdriver.request('GET', link)
                    soup = BeautifulSoup(r.content, 'lxml')

                    # для обработки ошибок
                    with open('files_of_users/current_book.txt',
                              'w',
                              encoding='utf-8') as f:
                        f.write(soup.prettify())

                    overview = [link]

                    book = soup.find('div', class_='block-border card-block')
                    author = []
                    if book.find('h2', class_='author-name unreg'):
                        authors = book.find('h2', class_='author-name unreg')
                        names = authors.find_all('a')
                        for name in names:
                            author.append(name.text)
                        overview.append(author)
                    else:
                        author.append('Сборник')
                        overview.append(author)
                    title = book.span.text
                    overview.append(title)
                    tags = book.find_all('a', class_='label-genre')
                    list_of_tags = []
                    for tag in tags:
                        if tag.text.startswith('№'):
                            tag = tag.text.split('в\xa0')[1]
                            list_of_tags.append(tag)
                        else:
                            list_of_tags.append(tag.text)
                    overview.append(list_of_tags)
                    cover = book.find('img', id='main-image-book')['src']
                    overview.append(cover)
                    if book.find('span', itemprop='ratingValue'):
                        rating = book.find('span', itemprop='ratingValue').text
                    else:
                        rating = 0
                    overview.append(rating)
                    description = book.p.text
                    overview.append(description)

                    data = []
                    if os.stat(f'files_of_users/list_of_books_{userlink}.txt'
                               ).st_size != 0:
                        with open(
                                f'files_of_users/list_of_books_{userlink}.txt',
                                'r') as f:
                            old = json.load(f)
                            for i in old:
                                data.append(i)

                    data.append(overview)
                    with open(f'files_of_users/list_of_books_{userlink}.txt',
                              'w') as f:
                        json.dump(data, f)
                    print('Обработана')

                else:
                    print('Уже обработана', link)

    webdriver.close()
    print('finish close_up')
    return render(request, 'liv/test.html')
Esempio n. 7
0
tStart = time.time()  #計時開始
# 爬取網頁內容
# r = requests.get('https://www.sinya.com.tw/show/?keyword=ASUS')

# 確認網頁狀態
# if r.status_code == requests.codes.ok:
#
# # 以BeautifulSoup 解析 HTML 程式碼
#     soup = BeautifulSoup(r.text, 'html.parser')

webdriver = Firefox(options=options)
# browser = webdriver.Chrome(options=chrome_options)
# browser.get("https://www.sinya.com.tw/diy")

response = webdriver.request('POST',
                             'https://www.sinya.com.tw/diy/show_option/',
                             data={"prod_sub_slave_id": "148"})
response.encoding = 'utf-8'
print('OK')
# print(response.text)

soup = BeautifulSoup(response.text, 'lxml')
# print(soup)
# print(soup)
links = (soup.find_all('div', class_='prodClick'))
# print(len(links))
print(links[0])
print('***********************')
# print(len(links))
# print(links[184].get('title').strip())
for link in links: