def do_GET(self): self.send_response_only(200) self.end_headers() cookie = self.headers['Cookie'] host_url = self.headers['Host'] browser.open(host_url, cookie, secure=not args.insecure)
def start(name, description): sequence = get(name) now = time.strftime('%Y-%m-%d-%H:%M:%S') print('==> [%s] Starting sequence %s' % (now, name)) browser.open() programs = sequence['programs'] for prog in programs: total = program.get_total(prog) sleep = total + sequence['wait'] now = time.strftime('%Y-%m-%d-%H:%M:%S') print('==> [%s] Starting program %s (will last %i minutes)' % (now, prog, sleep)) program.start(prog, description) systools.easysleep(sleep * 60) now = time.strftime('%Y-%m-%d-%H:%M:%S') print('==> [%s] Done' % (now))
def nextPage(self): self.currentPage += 1 url = self.searchUrl().format(self.currentPage) response = browser.open(url) self.pageHtml = lxml.html.fromstring(response) return self._isValidPage()
def nextPage(self): if self.onFirstPage: url = self._buildMangaUrl() response = browser.open(url) self.pageHtml = lxml.html.fromstring(response) self.onFirstPage = False return True else: return False
def get_image_url(): url = tag[0].get("href") response = browser.open(urlparse.urljoin('http://www.pixiv.net', url)) html = lxml.html.fromstring(response) imgtag = html.cssselect('div.works_display > a > img')[0] url = imgtag.get("src") thumbnail = __clean_url(url) image_url = thumbnail.replace("_m.", ".") return image_url
def __open_in_browser(self): external_browser.open(self.url().toString())
import browser import lxml.html from prettytable import PrettyTable # Open some site, let's pick a random one, the first that pops in mind: # assign depth, how many pages do you want to scrape... # get the news time, and title x = PrettyTable(["News", "time", "stock"]) for i in range(5): i = str(i + 1) r = browser.open( 'http://newsletter.hotstocked.com/newsletters/index/page-' + i) doc = lxml.html.parse(r).getroot() listItem = doc.cssselect(".nl-single-title") for item in listItem: title = item.cssselect("a")[0].text_content().strip() date = item.cssselect(".nl-single-date")[0].text_content() x.add_row([title, date, '---']) print x
#coding=utf8 import capture import browser import time import gen_config import config import ocr if __name__ == '__main__': url = 'http://aluckyapple-paipai.daoapp.io/' ie = browser.open(url) # wait until stateready time.sleep(5) img = capture.capture_window(ie.HWND) #img.save('ie.bmp', 'BMP') rect=capture.get_window_rect(ie.HWND) for k,v in config.capture_pos.items(): ow, oh = v rectangle = (rect.left + ow, rect.top + oh, rect.left + ow + gen_config.capture_width, rect.top + oh + 10) img = capture.capture_rect(rectangle) img.save('%s.bmp' % k, 'BMP') print ocr.recofull(img) time.sleep(5) #ie.Quit()
sys.exit(app.exec_()) if args.path: sys.exit(__app_path()) if args.options: app = QApplication(["silos"]) ex = QTWSOptionsWindow() sys.exit(app.exec_()) if not app_id: if args.url: app_id, profile = __find_app_by_url(args.url) if not app_id: url = args.url.replace("silo://", "https://") external_browser.open(url) sys.exit(0) else: app_id = "appChooser" if app_id and app_id.lower() == "appchooser": app = QApplication(["silos"]) ex = QTWSChooserWindow(QTWSFolderManager.apps_folder()) sys.exit(app.exec_()) if app_id: app_id = app_id.lower() app_path = None if args.appfile: app_path = args.appfile else:
import browser import lxml.html from prettytable import PrettyTable # Open some site, let's pick a random one, the first that pops in mind: # assign depth, how many pages do you want to scrape... # get the news time, and title x = PrettyTable(["News", "time", "stock"]) for i in range(5): i = str(i + 1) r = browser.open('http://newsletter.hotstocked.com/newsletters/index/page-' + i) doc = lxml.html.parse(r).getroot() listItem = doc.cssselect(".nl-single-title") for item in listItem: title = item.cssselect("a")[0].text_content().strip() date = item.cssselect(".nl-single-date")[0].text_content() x.add_row([title, date, '---']); print x