def login(linkedin_username, linkedin_password): cookie_filename = "cookies.txt" cookiejar = http.cookiejar.MozillaCookieJar(cookie_filename) opener = urllib.request.build_opener( urllib.request.HTTPRedirectHandler(), urllib.request.HTTPHandler(debuglevel=0), urllib.request.HTTPSHandler(debuglevel=0), urllib.request.HTTPCookieProcessor(cookiejar)) page = load_page(opener, "https://www.linkedin.com/uas/login") parse = BeautifulSoup(page, "html.parser") csrf = "" for link in parse.find_all('input'): name = link.get('name') if name == 'loginCsrfParam': csrf = link.get('value') login_data = urllib.parse.urlencode({ 'session_key': linkedin_username, 'session_password': linkedin_password, 'loginCsrfParam': csrf }) page = load_page(opener, "https://www.linkedin.com/checkpoint/lg/login-submit", login_data) parse = BeautifulSoup(page, "html.parser") cookie = "" try: cookie = cookiejar._cookies['.www.linkedin.com']['/']['li_at'].value except: print("[DEBUG] Cookie Value: ", cookie) return cookiejar.save() os.remove(cookie_filename) return cookie
def login(postdata): result = get_servertime() servertime = result[0] nonce = result[1] postdata['su'] = get_su() postdata['sp'] = get_sp(servertime, nonce) postdata['servertime'] = servertime postdata['nonce'] = nonce postdata = urllib.parse.urlencode(postdata) url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.94 Safari/537.36 OPR/27.0.1689.66 (Edition Baidu)', } req = urllib.request.Request(url, postdata.encode(), headers) text = urllib.request.urlopen(req) text = text.read() text = text.decode('gbk') result = match_login_url(text) opener.open(result) cookiejar.save()
def _store_cookies(self): """Store the session's cookiejar in a cookies.txt file""" if self._cookiefile and self.config("cookies-update", True): cookiejar = http.cookiejar.MozillaCookieJar() for cookie in self._cookiejar: cookiejar.set_cookie(cookie) try: cookiejar.save(self._cookiefile) except OSError as exc: self.log.warning("cookies: %s", exc)
def save_cookies(cookies, filename): cookiejar = http.cookiejar.MozillaCookieJar(filename) for c in cookies: args = dict(vars(c).items()) args['rest'] = args['_rest'] del args['_rest'] c = http.cookiejar.Cookie(**args) cookiejar.set_cookie(c) cookiejar.save(filename)
def startCourseImporter(): downloadDirectory = MediaManager(mw.col, None).dir() cookiefilename = os.path.join(mw.pm.profileFolder(), 'memrise.cookies') cookiejar = http.cookiejar.MozillaCookieJar(cookiefilename) if os.path.isfile(cookiefilename): cookiejar.load() memriseService = memrise.Service(downloadDirectory, cookiejar) if memriseService.isLoggedIn() or MemriseLoginDialog.login(memriseService): cookiejar.save() memriseCourseImporter = MemriseImportDialog(memriseService) memriseCourseImporter.exec_()
def test3(): # 保存cookie的本地磁盘文件名 filename = 'cookie.txt' cookiejar = http.cookiejar.MozillaCookieJar(filename) handler = urllib.request.HTTPCookieProcessor(cookiejar) opener = urllib.request.build_opener(handler) opener.open("http://www.baidu.com") cookiejar.save()
def save_cookie(): file = 'cookie.txt' cookiejar = http.cookiejar.LWPCookieJar(file) cookie_processor = urllib.request.HTTPCookieProcessor(cookiejar) opener = urllib.request.build_opener(cookie_processor) opener.open('http://www.baidu.com') cookiejar.save(filename=file, ignore_discard=True, ignore_expires=True) return opener
def SaveCookieToFileDemo(): #创建保存cookie的文件名 filename = 'cookie.txt' #创建MozillaCookirJar,这个类是可以将cookie写到文件的 cookiejar = http.cookiejar.MozillaCookieJar() #创建handler handler = urllib.request.HTTPCookieProcessor(cookiejar) opener = urllib.request.build_opener(handler) response = opener.open('http://www.baidu.com') cookiejar.save(filename)
def login(): if not os.path.exists(r"cookie.txt"): PixivID = input("ID:") password = input("Password:"******"pixiv_id"] = PixivID post_data["pass"] = password request = urllib.request.Request('http://www.pixiv.net/login.php', urllib.parse.urlencode(post_data).encode(encoding='utf_8')) urlOpener.open(request) cookiejar.save("cookie.txt") else: cookiejar.load("cookie.txt")
def login(): cookie_filename = "cookies.txt" cookiejar = http.cookiejar.MozillaCookieJar(cookie_filename) opener = urllib.request.build_opener( urllib.request.HTTPRedirectHandler(), urllib.request.HTTPHandler(debuglevel=0), urllib.request.HTTPSHandler(debuglevel=0), urllib.request.HTTPCookieProcessor(cookiejar)) page = loadPage(opener, "https://www.linkedin.com/uas/login").decode('utf-8') parse = BeautifulSoup(page, "html.parser") csrf = "" for link in parse.find_all('input'): name = link.get('name') if name == 'loginCsrfParam': csrf = link.get('value') login_data = urllib.parse.urlencode({ 'session_key': linkedin_username, 'session_password': linkedin_password, 'loginCsrfParam': csrf }) page = loadPage(opener, "https://www.linkedin.com/checkpoint/lg/login-submit", login_data).decode('utf-8') parse = BeautifulSoup(page, "html.parser") cookie = "" try: cookie = cookiejar._cookies['.www.linkedin.com']['/']['li_at'].value except: print( "Error logging in! Try changing language on social networks or verifying login data." ) print( "If a capcha is required to login (due to excessive attempts) it will keep failing, try using a VPN or running with the -s flag to show the browser, where you can manually solve the capcha." ) sys.exit(0) cookiejar.save() os.remove(cookie_filename) return cookie
def login(postdata): result=get_servertime() servertime=result[0] nonce=result[1] postdata['su']=get_su() postdata['sp']=get_sp(servertime,nonce) postdata['servertime']=servertime postdata['nonce']=nonce postdata=urllib.parse.urlencode(postdata) url='http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)' headers={ 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.94 Safari/537.36 OPR/27.0.1689.66 (Edition Baidu)', } req=urllib.request.Request(url,postdata.encode(),headers) text=urllib.request.urlopen(req) text=text.read() text=text.decode('gbk') result=match_login_url(text) opener.open(result) cookiejar.save()
## 为opener注册handler; 十分类似node/express的中间件 body-parser cookie-parser opener = urllib.request.build_opener(proxy_handler, cookie_handler) ## 为opener添加请求的头部 opener.addheaders = headers_list_tuple ## opener.addheaders = [(key,value), (key1, value1)] ## opener.addheaders.append(('Cookie', 'cookiename=cookievalue')) ## Request.add_header(key, val) request = urllib.request.Request(url2) ## 添加method ## Request(url, data, headers=headers, method=method) ## request.get_method = lambda: 'PUT' reponse = opener.open(request) # print(file.read(80).decode()) for item in cookiejar: print( item.name, item.value ) # <Cookie __cfduid=dd6bc42941031a7a945a804e2aca9be0f1541861819 for .realpython.com/> # cookiejar里面保存了cookie对象 # cookie里面可以有很多的 field,每个field都有自己的policy # Set-Cookie: name=wangzhen; path=/ domian=.wangzhen.com; max-age=300 # Set-Cookie :age=26; expires=Sun, 10-Nov-19 15:04:49 GMT; path=/; domain=.realpython.com; HttpOnly cookiejar.save(ignore_discard=True, ignore_expires=True) ##初次获取cookie后保存在本地 ''' print( reponse.read().decode() ) #UnicodeEncodeError: 'gbk' codec can't encode character '\U0001f430' in position 366917: illegal multibyte sequence but: print( reponse.read(800).decode() ) #ok 不出错!!! 为什么 '''
import urllib.request,http.cookiejar # 保存cookie的本地磁盘文件名 filename = "cookie.txt" # 声明一个MozillaCookieJar(有save实现)对象实例来保存cookie,之后写入文件 cookiejar = http.cookiejar.MozillaCookieJar(filename) handler = urllib.request.HTTPCookieProcessor(cookiejar) opener = urllib.request.build_opener(handler) response = opener.open("http://www.baidu.com") cookiejar.save()
# print(cookieStr[:-1]) ''' '' ''''' #------->>>>>> 访问网站获得cookie,并把获得的cookie保存在cookie文件中 # 保存cookie的本地磁盘文件名 filename = 'cookie.txt' # 声明一个MozillaCookieJar(有save实现)对象实例来保存cookie,之后写入文件 cookiejar = http.cookiejar.MozillaCookieJar(filename) # 使用HTTPCookieProcessor()来创建cookie处理器对象,参数为CookieJar()对象 handler = urt.HTTPCookieProcessor(cookiejar) # 通过 build_opener() 来构建opener opener = urt.build_opener(handler) # 创建一个请求,原理同urllib2的urlopen response = opener.open('http://www.baidu.com') # 保存cookie到本地文件 cookiejar.save(ignore_discard=True,ignore_expires=True) ''' '' '''''
def login(cookiepath): """ The login function prompts for an authorized KOA user ID and password, then sets up a workspace (or connects to an existing workspace) on the PRV pipeline server for that user. Args: cookiepath (string): a file path provided by the user to save returned cookie which is needed for the subsequent PRV operations. Example: >>> import hiresprv.auth >>> hiresprv.auth.login(cookiepath) # program will prompt for userid and password """ # TODO: Many of these variables are not used userid = '' password = '' url = '' response = '' jsondata = '' status = '' msg = '' debug = 0 debugfile = '' if len(debugfile) > 0: debug = 1 logging.basicConfig(filename=debugfile, level=logging.DEBUG) # TODO: Do we need this? with open(debugfile, 'w') as fdebug: pass if debug: logging.debug('') logging.debug('Enter Login.init:') logging.debug('cookiepath= %s' % cookiepath) # # get userid and password via keyboard input # userid = input("KOA userid: ") if debug: logging.debug('') logging.debug('userid= %s' % userid) password = getpass.getpass("KOA Password: "******"utf-8") jsondata = json.loads(sdata) if debug: logging.debug('here2: data= ') logging.debug(data) logging.debug('sdata= %s' % sdata) logging.debug('jsondata= ') logging.debug(jsondata) for key, val in jsondata.items(): if debug: logging.debug('key= %s val= %s' % (key, val)) if key == 'status': status = val if key == 'msg': msg = val if debug: logging.debug('status= %s msg= %s' % (status, msg)) if status == 'ok': cookiejar.save(cookiepath, ignore_discard=True) if debug: logging.debug('cookiejar saved to hirescookietxt') msg = 'Successful login as %s' % userid else: msg = 'Failed to login: %s' % msg if debug: logging.debug('') logging.debug('status= %s' % status) logging.debug('msg= %s' % msg) print(msg) return
'src': None } form_login = urlencode(dic_login).encode(encoding='UTF8') #构造csrf验证表单 dic_csrf = {'csrfmiddlewaretoken': csrfmiddlewaretoken} form_csrf = urlencode(dic_csrf).encode(encoding='UTF8') #post登陆页面 login_html = maxcrawler.get_webtext(opener, max_login, header_login, form_login) if not 'pkey' in [cookie.name for cookie in cookiejar]: print("We are not logged in !") else: print("We are logged in !") cookiejar.save(ignore_discard=True, ignore_expires=True) for ele in cookiejar: print('Name=' + ele.name) print('Value' + ele.value) #request 胜率数据页面 data_html = maxcrawler.get_webtext(opener, max_data, header_data, form_csrf) ############################# name_pattern = re.compile('<span class="hero-name-list">(.*?)</span>') hero_list = re.findall(name_pattern, data_html) print(hero_list) rate_pattern = re.compile( '([\d|\.]*)%</div><div class="segment segment-green"') rate_list = re.findall(rate_pattern, data_html)