def main(): args = arguments.Args() if args.get(0) == 'firefox': puts('Grabbing cookies from Firefox') jar = browsercookie.firefox() elif args.get(0) == 'chrome': puts('Grabbing cookies from Chrome') jar = browsercookie.chrome() else: puts('Grabbing cookies from Firefox') jar = browsercookie.firefox() url = 'https://www.safaribooksonline.com/a/export/csv/' puts('\nWaiting for download to begin... (may take a while)') with blindspin.spinner(): r = requests.get(url, stream=True, cookies=jar) total_size = int(r.headers.get('content-length', 0)) filename = 'safari.csv' with open(filename, 'wb') as out_file: for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_size/1024) + 1): if chunk: out_file.write(chunk) out_file.flush() puts('File saved to {filename}\n'.format(filename=filename))
def setUp(self): logging.basicConfig(level=logging.DEBUG) with open('data/model.json') as infile: self.a = FantasyLabsNFLAgent(cache_name='data/testfantasylabs', cj=browsercookie.firefox()) self.players = json.load(infile)
def get_aanduidingsobject_waarden_html(url, feature, parent): id = 'bes_bescherming.' + url.rsplit('/', 1)[-1] url = 'https://inventaris.onroerenderfgoed.be/erfgoed/node/' + str( id) + '/waarden.json' cj = browsercookie.firefox() waarden = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() if 'waarden' in waarden: s = [] for waarde in waarden['waarden']: for waardetype in waarde['waardetypes']: url = 'https://inventaris.onroerenderfgoed.be/thesaurus/waarde/' + str( waardetype) + '.json' label = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json()['term'] s.append(label) htmlstring = '<p>' + '<br>'.join(s) + '</p>' return htmlstring else: return ''
def get_aanduidingsobject_waarden_tekst_html(url, feature, parent): id = 'bes_bescherming.' + url.rsplit('/', 1)[-1] url = 'https://inventaris.onroerenderfgoed.be/erfgoed/node/' + str( id) + '/waarden.json' cj = browsercookie.firefox() waarden = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() if 'waarden' in waarden and len(waarden['waarden']) > 0: h = '<h4>Erfgoedwaarden</h4>' for waarde in waarden['waarden']: if waarde['uiteenzetting']: tekst = waarde["uiteenzetting"] else: tekst = '' for waardetype in waarde['waardetypes']: url = 'https://inventaris.onroerenderfgoed.be/thesaurus/waarde/' + str( waardetype) + '.json' label = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json()['term'] h = h + '<p><b>' + label + '</b></p>' h = h + '<p>' + tekst + '</p>' return h else: return ''
def cookies_test1(): get_title = lambda html: re.findall('<title>(.*?)</title>', html, flags=re.DOTALL)[0].strip() cj = browsercookie.firefox() # cj = browsercookie.chrome() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) html = opener.open(URL).read() print get_title(html)
def get_aanduidingsobject_url(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() return e_obj['relaties'][0]['links'][0]['href']
def get_aanduidingsobject_tekst(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() return '<div>' + e_obj['primaire_tekst']['tekst'] + '</div>'
def get_aanduidingsobject_gemeente(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() return e_obj['locatie']['gemeente']
def get_aanduidingsobject_korte_beschrijving(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() return e_obj['korte_beschrijving']
def load_brower_cookies(self): jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def get_response(url, params): """attach cookies to request and return body of GET request""" # dump session cookies from browser cj = browsercookie.firefox() # use cookies for get request response = requests.get(url=url, params=params, cookies=cj) # make soup out of response body response_body = response.content return response_body
def __load_cookie_jar(self): if self.__browser == 'chrome': self.__cj = chrome() elif self.__browser == 'safari': self.__cj = safari() elif self.__browser == 'firefox': self.__cj = firefox() else: raise ValueError
def load_browser_cookies(self): # chrome浏览器cookie记录 chrome_jar = self.jars['chrome'] for c in browsercookie.chrome(): chrome_jar.set_cookie(c) # firefox浏览器cookie记录 firefox_jar = self.jars['firefox'] for c in browsercookie.firefox(): firefox_jar.set_cookie(c)
def main(): parser = argparse.ArgumentParser(description='Monitor clipboard and fetch urls matching regex') parser.add_argument('-f', "--file", action='store', help='output file path', default='list.list') group = parser.add_mutually_exclusive_group() group.add_argument('-m', "--match-repository", action='store', help='file containing match patterns, reach line one pattern', default='.regexlist') group.add_argument('-r', "--match-regex", action='store', help='Regex to match urls') group = parser.add_mutually_exclusive_group() group.add_argument('-F', '--firefox-cookies', action='store_true', default=False, help='Use firefox cookies for title fetch requests') group.add_argument('-C', '--chrome-cookies', action='store_true', default=False, help='Use Chrome cookies for title fetch requests') group.add_argument('-J', '--jar-cookies', action='store', default=False, help='Use JAR file as cookies for title fetch requests') parser.add_argument('-t', '--notify-title', required=(not set(sys.argv).isdisjoint(('-F', '--firefox-cookies', '-C', '--chrome-cookies', '-J', '--jar-cookies'))), default=False, action='store_true', help='Show page title in desktop notifications (slower notifications)') parser.add_argument('-n', '--notify', required=(not set(sys.argv).isdisjoint(('-t', '--notify-title'))), default=False, action='store_true', help='Show desktop notifications') args = parser.parse_args() outfile = args.file if args.match_regex: mon_domains = [args.match_regex] else: mon_domains = tuple(open(args.match_repository, 'r')) if args.firefox_cookies: cj = browsercookie.firefox() elif args.chrome_cookies: cj = browsercookie.chrome() elif args.jar_cookies: cj = args.jar_cookies else: cj = None with ClipboardWatcher(mon_domains, 1., outfile, args.notify, args.notify_title, cj) as watcher: print("Start monitoring ...") watcher.start() while True: try: # print("Waiting for changed clipboard...") time.sleep(.1) except KeyboardInterrupt: print("Exit request received. cleaning up ...") watcher.stop() watcher.join() print("Finished") break
def dump_cookies() -> None: # cj = browsercookie.load() # cj = browsercookie.chrome() cj = browsercookie.firefox() # print(len(cj._cookies)) # print(cj._cookies) # noinspection PyProtectedMember # pylint: disable=protected-access for k, v in cj._cookies.items(): print("=======================") print(k, v)
def load_browser_cookies(self): # ¼ÓÔØ Chrome ä¯ÀÀÆ÷ÖÐµÄ Cookie jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) # ¼ÓÔØ Firefox ä¯ÀÀÆ÷ÖÐµÄ Cookie jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def load_broser_cookies(self): # 把chrome中的cookie全部迭代赋值给jar jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) # 把firefox中的cookie全部迭代赋值给jar jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def load_browser_cookies(self): # 加载 Chorme 浏览器中的 cookie jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) # 加载 Firefox 浏览器中的 Cookie jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def load_browser_cookies(self): # 加载Chrome 浏览器中的Cookie jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) # 加载Firefox 浏览器中的Cookie jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def get_aanduidingsobject_stijl_html(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() s = [] if 'stijl' in e_obj and len(e_obj['stijl']) > 0: for naam in e_obj['stijl']: s.append(naam['naam']) htmlstring = '<p><b>Stijl of cultuur: </b>' + ', '.join(s) + '</p>' return htmlstring else: return ''
def get_data(place, level): global requests # Cookie Jar cj = browsercookie.firefox() url = 'https://www.facebook.com/search/str/friends' while (level > 1): url += '%2Bof%2Bfriends' level -= 1 url += '%2Bwho%2Blive%2Bin%2B' url += place.replace(" ", "%2B") url += '/keywords_users' #scrap friend list and stuff page = requests.get(url, cookies=cj) return page.content
def __init__(self, domain): self.domain = domain self.jar = None self.path = f"{os.path.join(__dir__, self.domain)}.txt" if self: try: with open(self.path) as f: c = json.load(f) except: raise NotImplementedError('cookies 格式不支持') else: self.jar = requests.utils.cookiejar_from_dict( {c['name']: c['value'] for c in c}) else: # 文件不存在便尝试从浏览器中读取 self.jar = browsercookie.firefox()
def __init__(self, cache_name=None): logging.getLogger(__name__).addHandler(logging.NullHandler()) self.s = requests.Session() self.s.cookies = browsercookie.firefox() self.s.headers.update({ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36' }) if cache_name: requests_cache.install_cache(cache_name) else: requests_cache.install_cache( os.path.join(os.path.expanduser("~"), '.rcache', 'dk-nfl-cache'))
def load_browser_cookies(self): """ 使用self.jars['chrome']和self.jars['firefox']从默认字典中获得两个CookieJar对象, 然后调用browsercookie的chrome和firefox方法,分别获取两个浏览器中的Cookie,将它们填入各自的CookieJar对象中。 """ # 加载Chrome 浏览器中的Cookie jar = self.jars['chrome'] chrome_cookiejar = browsercookie.chrome() for cookie in chrome_cookiejar: jar.set_cookie(cookie) # 加载Firefox 浏览器中的cookie jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
def main(): logging.basicConfig(stream=sys.stdout, level=logging.INFO) config = ConfigParser() configfn = os.path.join(os.path.expanduser('~'), '.pgcred') config.read(configfn) cn = 'fl-agent-{}'.format(today()) fla = FantasyLabsNBAAgent(db=None, cache_name=cn, cookies=browsercookie.firefox()) models = fla.one_model(today(fmt='fl'), 'phan') optimizer = LineupOptimizer(settings.DraftKingsBasketballSettings) optimizer._players = fl_to_pydfs(models, weights=[.1, .6, .3]) for lineup in optimizer.optimize(n=5): print lineup
def get_friend_list(place,level): global requests # Cookie Jar cj = browsercookie.firefox() url ='https://www.facebook.com/search/str/friends' while (level > 1): url += '%2Bof%2Bfriends' level -= 1 url += '%2Bwho%2Blive%2Bin%2B' url += place.replace(" ","%2B") url += '/keywords_users' #scrap friend list and stuff page = requests.get(url, cookies=cj).content #soup=BeautifulSoup(page.text) #div=soup.find('div',{'id':'BrowseResultsContainer'}) print page return ''
def get_aanduidingstype(url, feature, parent): cj = browsercookie.firefox() e_obj = requests.get(url, headers={ 'Accept': 'application/json' }, cookies=cj).json() mapping = { "https://id.erfgoed.net/thesauri/aanduidingstypes/11": "Vastgestelde inventaris van de archeologische zones", "https://id.erfgoed.net/thesauri/aanduidingstypes/10": "Vastgestelde landschapsatlas", "https://id.erfgoed.net/thesauri/aanduidingstypes/9": "Vastgestelde inventaris van het bouwkundig erfgoed", "https://id.erfgoed.net/thesauri/aanduidingstypes/12": "Vastgestelde inventaris van houtige beplantingen met erfgoedwaarde", "https://id.erfgoed.net/thesauri/aanduidingstypes/13": "Vastgestelde inventaris van historische tuinen en parken" } return mapping[e_obj['type']['uri']]
def get_subreddits(use_cookies): headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' } address = "https://www.reddit.com/subreddits" if use_cookies: cj = browsercookie.firefox() html = requests.get(address, cookies=cj, headers=headers).text else: html = requests.get(address, headers=headers).text html = html[:html.find("multireddit of")] html = html[html.rfind("href="):] html = html[html.rfind("/") + 1:html.find(" ") - 1] subreddits = html.split("+") subreddits = list(map(lambda x: str(unicode(x)), subreddits)) return subreddits
def main(): """ 「QMAジャパンツアー2016 グランドスラムダービー」のすべての組み合わせに投票する。 スクリプトでは画像認証を突破できないので、ブラウザのCookieを利用してログインする。 """ # Cookie取得対象のブラウザを決定する args = docopt.docopt(__doc__, version=VERSION) if args.get('-b') == 'firefox': cookie_jar = browsercookie.firefox() else: cookie_jar = browsercookie.chrome() # 投票ページのHTMLを取得する response = requests.get(VOTE_PAGE_URL, cookies=cookie_jar) # ログインユーザー名を取得する soup = bs4.BeautifulSoup(response.text.encode(response.encoding), 'html.parser') player_name_box = soup.find('div', {'class': 'player_name_box'}) if not player_name_box: print u'指定のブラウザでログインしてから実行してください' exit() player_name = player_name_box.text.strip() # 確認メッセージを表示する message = player_name + u'さんのアカウントで投票します (Y/n) : ' if raw_input(message.encode(CONSOLE_ENCODE)).strip().lower() != 'y': exit() # 賢闘士の情報を取得する select = soup.find('select', {'name': 'vote0'}) options = select.find_all('option') players = [Player(name=option.text.strip(), value=option['value'].strip()) for option in options] # すべての組み合わせに投票する for quinella in itertools.combinations(players, 2): print quinella[0].name, '-', quinella[1].name, u'に投票しています...' requests.post(VOTE_PAGE_URL, {'vote0': quinella[0].value, 'vote1': quinella[1].value}, cookies=cookie_jar) time.sleep(WAIT_AFTER_VOTE) # 正常終了 print u'投票が完了しました!'
def threadget(self): while True: param = self.q_req.get() with self.lock: self.running += 1 try: #ans = self.opener.open(param.get('url')).read() if (param.get('browser') == 'chrome'): cookies = browsercookie.chrome() else: cookies = browsercookie.firefox() if cookies is None: ans = requests.get(param.get('url'), headers=self.headers).content else: ans = requests.get(param.get('url'), cookies=cookies, headers=self.headers).content except Exception as e: self.q_ans.put((param.get('url'), param.get('current_page'), param.get('end_page'), e)) else: self.q_ans.put((param.get('url'), param.get('current_page'), param.get('end_page'), ans)) with self.lock: self.running -= 1 #self.opener.close() self.q_req.task_done() time.sleep(0.1) # don't spam
def load_browser_cookies(self): jar = self.jars['firefox'] firefox_cookiejar = browsercookie.firefox() for cookie in firefox_cookiejar: jar.set_cookie(cookie)
import httpx import browsercookie from urllib.parse import unquote _COOKIES = browsercookie.firefox() # chrome? token = unquote(next(c for c in _COOKIES if 'perekrestok.accessToken' in c.name).value) async with httpx.AsyncClient() as client: response = await client.get( 'https://my.perekrestok.ru/api/v4/transactions/list?limit=50', headers={"X-Authorization": token}, ) ids = ','.join(i['id'] for i in response.json()['data'] if i['type'] == 'buy') response = await client.get( f'https://my.perekrestok.ru/api/v4/transactions/details?id={ids}', headers={"X-Authorization": token}, ) response.json()
__author__ = "BlackSwan" from lxml import html from lxml import etree import requests import browsercookie import winsound import easygui import time import sys while True: cj = browsercookie.firefox() page = requests.get(sys.argv[1], cookies=cj) tree = html.fromstring(page.content) counter = 0 status = tree.xpath('//*[@id="content"]/p[2]/span[2]/b') if status[0].text == "corto": if counter == 0: easygui.msgbox("CORTO") break if status[0].text == "muy corto": winsound.Beep(300, 2000) easygui.msgbox("MUY CORTO")