示例#1
0
    def get_episodes(self, url: str, sess: requests.sessions.Session) -> Optional[list[str]]:
        """ Get season's episodes """
        logger.debug("Getting season's episodes")

        with sess.get(url) as resp:
            logger.debug("Getting url content")
            data = resp.content

        logger.debug("Creating beautiful soup parser")
        soup = BeautifulSoup(data, "html.parser")

        logger.debug("Using beautiful soup object to find pages elements")
        pages_element = soup.find("div", {"class": "pagination"})
        pages = []
        if pages_element:
            for child in pages_element.findChildren("a"):
                child = child.get('href')
                pages.append(child)
        else:
            logger.warning("Could not find pages element")

        episode_links = []

        logger.debug("Using beautiful soup object to find elements matching regex")
        text_pattern = re.compile(fr'^Episode 0?({self.episodes_regex})')
        elements = soup.find_all("a", text=text_pattern)
        if elements:
            for element in elements:
                logger.debug("Getting href from beautiful soup element")
                episode_links.append(element.get('href'))

        if pages:
            logger.debug("Getting links from the other pages")
            for page in pages:
                with sess.get(page) as resp:
                    logger.debug("Getting url content")
                    data = resp.content

                logger.debug("Creating beautiful soup parser")
                soup = BeautifulSoup(data, "html.parser")
                logger.debug("Using beautiful soup object to find elements matching regex")
                text_pattern = re.compile(fr'^Episode 0?({self.episodes_regex})')
                elements = soup.find_all("a", text=text_pattern)

                if elements:
                    for element in elements:
                        logger.debug("Getting href from beautiful soup element")
                        episode_links.append(element.get('href'))

        if episode_links:
            return episode_links

        logger.warning("No episode links found")
示例#2
0
 def email_and_password_auth(self, session: requests.sessions.Session,
                             email: str,
                             password: str) -> requests.models.Response:
     session.get(f"{base}/id/api/csrf")
     res = session.post(
         f"{base}/id/api/login",
         headers={"x-xsrf-token": session.cookies.get("XSRF-TOKEN")},
         data={
             "email": email,
             "password": password,
             "rememberMe": False,
             "captcha": ""
         },
         cookies=session.cookies)
     return res
示例#3
0
def get_answer(session:requests.sessions.Session,queston_id:int):
    answer_url="http://www.shangxueba.com/ask/ajax/zuijiainfo.aspx?id={queston_id}".format(queston_id=queston_id)
    html = session.get(answer_url).text
    pattern = re.compile("<div class=\"xj_contextinfo\">\n<h6>\n(.*?)\n</h6>\n</div>", re.S)
    res = re.search(pattern, html)
    answer = html2text.html2text(res.group(1))
    return answer
示例#4
0
    def _download_image(self, session: requests.sessions.Session,
                        comic_url: str, filename: str) -> None:
        """
        Download the image file.

        Args:
            session (class 'requests.sessions.Session'): the Session object.
            comic_url (str): String containing the image url.
            filename (str): String of the filename to save the image to.

        Returns: None
        """
        # print(f'Downloading page http://xkcd.com/{url_number}...')

        response = session.get(comic_url)
        if response.status_code != 200:
            # At present two comics - 1608 and 1668 don't have an image - 403
            # and 404 returns 404.
            # Is there a better way to handle this, in case there are redirects etc?
            return None

        with open(os.path.join('xkcd', filename), 'xb') as image_file:
            if not self.run_mode:
                print(f'Downloading image {comic_url}...')

            for chunk in response.iter_content(100000):
                image_file.write(chunk)
示例#5
0
def get_question(session:requests.sessions.Session,queston_id:int):
    question_url = "https://www.shangxueba.com/ask/{queston_id}.html".format(queston_id=queston_id)
    html = session.get(question_url).text
    pattern = re.compile("<div class=\"s_mess2_m\">(.*?)</div>", re.S)
    res = re.search(pattern, html)
    question = html2text.html2text(res.group(1))
    return question
示例#6
0
def get_review_texts_by_url(relative_reviews_urls: list,
                            session: requests.sessions.Session,
                            root_url: str) -> List[Tuple[str, str]]:
    """
    :param relative_reviews_urls: Список относительных URL относительно
    root_url.
    :param session: Сессия
    :param root_url: префикс любого URL отзыва на некоторую книгу
    :return: Список, состоящий из пар (абсолютный URL, текст книги)
    """
    texts = []
    for relative_url in tqdm(relative_reviews_urls):
        review_url = f"{root_url}/{relative_url}"
        response = session.get(review_url,
                               headers={"User-Agent": "Mozilla/5.0"})
        response.encoding = "utf-8"
        response = response.text
        response = BeautifulSoup(response, "html.parser")
        text = response.find("div", {"class": "universal-blocks-content"}).text

        text = re.sub(f"[\t ]+", " ", text)

        texts.append((review_url, text))

    return texts
示例#7
0
def parse_stories(session: requests.sessions.Session, num_of_stories: int):
    tags_counter = Counter()
    id_stories = set()
    i = 1
    paramload = {'page': str(i)}
    complete = False

    while not complete:

        request = session.get(HOME, params=paramload)
        print('request for new page', request.status_code)
        soup = BeautifulSoup(request.text, 'lxml')
        stories = soup.find_all('article', class_='story')

        for i, story in enumerate(stories):
            if story['data-story-id'] not in id_stories:
                id_stories.add(story['data-story-id'])
                print('story id', story['data-story-id'])
                try:
                    tags_str = story.find('div',
                                          class_='story__tags tags').text[1:-1]
                    print(tags_str)
                    for tag in tags_str.split(' '):
                        tags_counter[tag] += 1
                except AttributeError:
                    print('story without tags')

                if len(id_stories) == num_of_stories:
                    complete = True
                    break
        print(len(id_stories))

        i += 1
        paramload['page'] = str(i)
    return tags_counter
示例#8
0
    def get_episode_quality_link(self, url: str, sess: requests.sessions.Session) -> Optional[str]:
        """ Get episode's quality download link """
        logger.debug("Getting episode's quality download link")

        with sess.get(url) as resp:
            logger.debug("Getting url content")
            data = resp.content

        logger.debug("Creating beautiful soup parser")
        soup = BeautifulSoup(data, "html.parser")

        dl_pattern = re.compile(
                r'^Click to Download Episode \d{1,6}(.+)? in HD Mp4 Format$',
                re.IGNORECASE
        )
        dl_pattern2 = re.compile(
                r'^Click to Download Episode \d{1,6}(.+)? in Mp4 Format$',
                re.IGNORECASE
        )

        logger.debug("Using beautiful soup object to find elements matching dl_pattern regex")
        element = soup.find("a", text=dl_pattern)
        if not element:
                element = soup.find("a", text=dl_pattern2)

        if element:
            logger.debug("Getting href from beautiful soup element")
            return element.get('href')

        logger.warning("No episode quality links found")
示例#9
0
def GetImgUrlsFromSetUrl(session:requests.sessions.Session,imgset_url:str):
    #参数:已有session, 图集的url:img_set_url,图集的名称: imgset_name
    #返回 img_urls list
    img_urls=[]
    #初始化图片的url的list
    imgset_response = session.get(imgset_url)
    # 得到一个图片集的response
    imgset_html = imgset_response.text
    # 得到图片集的html
    imgset_soup = BeautifulSoup(imgset_html, 'lxml')
    # 用BeautifulSoup处理图片集html
    img_tags = imgset_soup.find_all('img')
    #得到所有的Img tags
    # 一个图片在一个img tag里的src

    #创建图片的url list
    for img_tag in img_tags:
        img_url = img_tag['src']
        # 得到单个图片url
        img_urls.append(img_url)
        #将这个图片的url加入list

    # imgset_name = imgset_url.split('/')[-1]
    # #暂时的图集名字

    return img_urls
示例#10
0
    def get_special_offer(self, session: requests.sessions.Session, email: str,
                          password: str, user_agent: str,
                          language: str) -> (str, None):
        launcher_access_token = AuthUtil.authenticate(self, session, email,
                                                      password, user_agent)
        data = io.StringIO(
            session.get(
                "https://fortnite-public-service-prod11.ol.epicgames.com/fortnite/api/cloudstorage/system/a22d837b6a2b46349421259c0a5411bf",
                headers={
                    "Authorization": f"Bearer {launcher_access_token}"
                }).text).readlines()

        for text in data:
            if ('Key="AC1E7A1349AB80D63BFF31A642006C54"'
                    in text) or ('NativeString="Special Featured"' in text):
                text = text
                break

        match = re.search(r'LocalizedStrings=.+', text)
        if match is not None:
            match = eval(
                match.group(0).replace("LocalizedStrings=", "",
                                       1).replace(")", "", 1), globals())
            for i in match:
                if i[0] == language:
                    match = i[1]
            log.info(f"Special Offer: {match}")
            return match
        else:
            log.info(f"Special Offer: None")
            return None
示例#11
0
def GetSetUrlsFromTypeUrl(session:requests.sessions.Session,type_url:str,source_url:str):
    #

    imgset_urls_dic = {}
    # 初始化图集信息的dic

    type_response = session.get(type_url)
    # 进入类型网页
    type_html = type_response.text
    # 得到类型的网页html

    type_soup = BeautifulSoup(type_html, 'lxml')
    # 用Beautifulsoup解析
    imgset_url_tag_list = type_soup.find('ul', {'class': 'textList'}).find_all('a')
    # 得到图集的url-list

    # 从中选择textLIst为url-list所在,其中a tag 装了href

    for a_tag in imgset_url_tag_list:
        imgset_name=a_tag.get_text()[5:]
        #从atag解析出去了日期的名字部分
        imgset_url = source_url + a_tag['href']
        #得到url
        imgset_urls_dic[imgset_name]=imgset_url
        #放入图集信息dic
        #key: imgsetname  value url

    return imgset_urls_dic
示例#12
0
def check_Login(session:requests.sessions.Session):
    html=session.get("http://passport.shangxueba.com").text
    pattern = re.compile("<p class=\"persPcConRiOneP2\">\s+\S+,欢迎您!</p>",re.S)
    if re.search(pattern, html):
        return True
    else:
        return False
示例#13
0
def put_piece(
    session: requests.sessions.Session,
    server_address: str,
    piece_number: int,
    dice: int,
) -> Dict:
    req = session.get(f"{server_address}/play/out/{piece_number}/{dice}")
    return req.json()
示例#14
0
def getPage(
    url: str,
    timeout: float = 5,
    session: requests.sessions.Session = makeSession()) -> str:
    try:
        return session.get(url, timeout=timeout).text
    except requests.exceptions.ConnectionError:
        return None
示例#15
0
def get_film_data(s: requests.sessions.Session,
                  film_id: str,
                  delay: float = 0,
                  proxy: Dict = None) -> Dict:
    film_data = {}
    film_link = f'https://www.kinopoisk.ru/film/{film_id}/'
    page = s.get(film_link, proxies=proxy)

    tree = html.fromstring(page.text)
    title = tree.xpath("//span[@class='styles_title__2l0HH']")[0].text

    # info = tree.xpath('//*[@id="__next"]/div/div[2]/div[1]/div[2]/div/div[3]/div/div/div[2]/div[1]/div')[0]
    info = tree.xpath('.//h3[text()="О фильме"]')[0].find('..')[1]

    release_date = info[0][1][0].text
    country = info[1][1][0].text
    box_office = info[13][1][0].text
    buf = box_office.find('=')
    box_office = ''.join(box_office[buf + 3:].split())

    film_data['title'] = title
    film_data['release_date'] = release_date
    film_data['country'] = country
    film_data['box_office'] = box_office

    time.sleep(delay)
    logging.info(f'Информация о фильме {title} загружена')
    actors_link = film_link + 'cast/'
    page = s.get(actors_link, proxies=proxy)
    tree = html.fromstring(page.text)

    actors = []
    z = tree.xpath('//*[@id="block_left"]/div')[0]
    current_type = ''
    for i in range(len(z)):
        name = z[i].attrib.get('name', None)
        if name is not None:
            current_type = name
        cls = z[i].attrib.get('class', None)
        if cls is not None and 'dub' in cls:
            fio = z[i].find_class('name')[0][0].text
            actors.append([fio, current_type])
    film_data['actors'] = actors
    logging.info(f'Информация об актерах фильма {title} загружена')
    return film_data
示例#16
0
def login(session: requests.sessions.Session):
    url="https://passport.shangxueba.com/user/userlogin.aspx?url=https%3A//www.shangxueba.com/"
    code_url="https://passport.shangxueba.com/VerifyCode.aspx"
    page=session.get(url)
    soup=BeautifulSoup(page.text,'lxml')
    #登录账号
    flag=False
    max_login_time=15
    while max_login_time > 0 and not flag:
        code_image=session.get(code_url).content
        with open("code.jpeg","wb") as f:
            f.write(code_image)
        code = get_code("./code.jpeg")
        headers={
            "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
            "Referer":"https://passport.shangxueba.com/user/userlogin.aspx?url=https%3A//www.shangxueba.com/"
        }
        data={
            "__EVENTTARGET":"",
            "__EVENTARGUMENT":"",
            "__VIEWSTATE":soup.find('input', id='__VIEWSTATE')["value"],
            "__EVENTVALIDATION":soup.find('input', id='__EVENTVALIDATION')["value"],
            "txtName": "username",
            "txtPassword": "******",
            "txtVerifycode":code,
            "hidcode":"",
            "hidflag":"1",
            "Button1":""
        }
        req=session.post(url,headers=headers,data=data)
        if "欢迎您回来" in req.text:
            flag=True
            break
        if "验证码错误" in req.text:
            max_login_time -= 1
            continue
        else:
            max_login_time -= 1
            continue
    if flag == False:
        return False
    else:
        save_cookie(session)
        return session
示例#17
0
 def _get_comic_json(self, session: requests.sessions.Session,
                     comic_number: int) -> Optional[Dict]:
     """
     """
     response = session.get('https://xkcd.com/' + str(comic_number) +
                            '/info.0.json')
     if response.status_code != 200:
         # handle
         return None
     return response.json()
示例#18
0
def DownloadImg(session: requests.sessions.Session, img_url: str, path: str,
                img_name: str):
    #在此处理响应。
    img_response = session.get(img_url, stream=True)
    #用已有session,得到图片,stream
    with open('%s/%s' % (path, img_name), 'wb') as f:
        for chunk in img_response.iter_content(chunk_size=128):
            f.write(chunk)
            # 写入图片
    return True
示例#19
0
def extend_webapp(session: requests.sessions.Session) -> None:
    r = session.get(
        "https://www.pythonanywhere.com/user/{}/webapps/".format(LOGIN))
    csrfmiddlewaretoken = csrfgetter(r.text).csrfToken
    session.post(
        WEB_APP_URL,
        headers={
            "Referer":
            "https://www.pythonanywhere.com/user/{}/webapps/".format(LOGIN)
        },
        data={"csrfmiddlewaretoken": csrfmiddlewaretoken})
示例#20
0
def get_wiki_response(
    url: str, session: requests.sessions.Session = requests.Session()
) -> requests.Response:
    """
    Get a wiki response for a URL in a (relatively) safe manner (for bots)
    """
    try:
        response = session.get(url)
        while (response.status_code == 503 or response.status_code
               == 200) and "Retry-After" in response.headers:
            timeout = response.headers["Retry-After"]
            logging.info(f"Sleeping for {timeout}")
            time.sleep(response.headers["Retry-After"])
            response = session.get(url)
        time.sleep(1)
        response.raise_for_status()
        return response
    except requests.ConnectionError as e:
        logging.exception(e)
        if e.response:
            logging.error(e.response.text())
        time.sleep(10)
        return get_wiki_response(url, session=session)
示例#21
0
def download_last_payslip(session: requests.sessions.Session, eurecia_host: str, payslip_name: str):
    print("Download last payslip using API")
    eurecia_host = config["eurecia_host"]

    baseurl = f"https://{eurecia_host}/eurecia/api/v1/payslip"

    response = session.get(baseurl)
    if response.ok:
        payslip_list = response.json()
    else:
        print(response.content)
        raise ValueError(response.status_code)

    last_payslip_url = (
        f"https://{eurecia_host}/" + payslip_list["2020"][0]["files"][0]["urlContent"]
    )

    filename = payslip_name + payslip_list["2020"][0]["description"]
    filename = filename.replace(" ", "-")
    response = session.get(last_payslip_url)
    if response.status_code == 200:
        with open(f"{filename}.pdf", "wb") as f:
            f.write(response.content)
        print("OK")
示例#22
0
def download_calendar(session: requests.sessions.Session, eurecia_host: str, calendar_name: str):
    print("Download calendar using API")

    baseurl = f"https://{eurecia_host}/eurecia/planningVacation/planning.do?print=all"

    response = session.get(baseurl)
    if response.ok:
        calendar_raw = response.text
    else:
        print(response.content)
        raise ValueError(response.status_code)

    if response.ok:
        with open(f"{calendar_name}.html", "w") as f:
            f.write(calendar_raw)
        print("OK")
    title_regex = r"title=\"([\w\s,'\/()-\.]+)\s([0-9\/-]+)\s\(([\w\s,'\/()-\.]+)\)\s([\w\s,'\/()-\.]+)\s\""
    calendar_extracted = re.findall(title_regex, calendar_raw, re.MULTILINE)
def validate_access(s: requests.sessions.Session) -> bool:
    base_page = s.get(
        'http://desarrollo.lda/CheckingPRO/dashboard/view.run?category=requests'
    )
    try:
        soup = BeautifulSoup(base_page.content, 'html.parser')

        if HOME_TITLE == soup.title.get_text():
            print("Acceso verificado.")
            return True

        else:
            print("No se ha logrado acceder.")

    except Exception as e:
        print("error de acceso", e)

    return False
示例#24
0
def extend_task(session: requests.sessions.Session) -> None:
    r = session.get(
        "https://www.pythonanywhere.com/user/{}/tasks_tab/".format(LOGIN))
    CSRFToken = csrfgetter(r.text).csrfToken
    r = session.post(
        TASK_URL,
        headers={
            "Referer":
            "https://www.pythonanywhere.com/user/{}/tasks_tab/".format(LOGIN),
            "X-CSRFToken":
            CSRFToken
        })
    if r.headers.get("Content-Type") == "application/json":
        r = r.json()
        if r.get("status") != "success":
            raise Exception(
                "[Update task] status != success. Response json: {}".format(r))
    else:
        raise Exception("[Update task] Server returns non json")
示例#25
0
    def _get_resources(
        self,
        session: requests.sessions.Session,
        url: str,
        parameters: Optional[Dict[str, Any]],
    ) -> List[Dict[str, Any]]:
        """Get data from Netbox API"""
        nb_result: List[Dict[str, Any]] = []

        # Since the api uses pagination we have to fetch until no next is provided
        while url:
            r = session.get(url, params=parameters)

            if not r.status_code == 200:
                raise ValueError(
                    f"Failed to get valid response from Netbox instance {url}")

            resp = r.json()
            nb_result.extend(resp.get("results"))

            url = resp.get("next")

        return nb_result
示例#26
0
    def get_seasons(self, url: str, sess: requests.sessions.Session) -> Optional[list[str]]:
        """ Get season links """
        logger.debug("Getting seasons")

        with sess.get(url) as resp:
            logger.debug("Getting url content")
            data = resp.content

        logger.debug("Creating beautiful soup parser")
        soup = BeautifulSoup(data, "html.parser")

        text_pattern = re.compile(fr'^Season 0?({self.seasons_regex})$')

        logger.debug("Using beautiful soup object to find pages elements")
        elements = soup.find_all("a", text=text_pattern)
        season_links = []
        if elements:
            for element in elements:
                season_links.append(element.get('href'))

        if season_links:
            return season_links

        logger.warning("No season links found")
示例#27
0
def pfr_url_to_df(session: requests.sessions.Session,
                  url: str) -> pd.DataFrame:
    """
    Takes a URL that contains a query for the
    Pro Football Reference and returns a Pandas dataframe.

    This function will take care of cleaning the data, and ensuring all the
    data types are properly set
    """

    headers = {
        'User-Agent':
        ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
         '(KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36')
    }

    # Explicitly set the types of columns
    converters = {
        'Rk': lambda x: pd.to_numeric(x, errors='ignore'),
        'Age': lambda x: pd.to_numeric(x, errors='ignore'),
        'Date': lambda x: pd.to_datetime(x, errors='ignore'),
        'G#': lambda x: pd.to_numeric(x, errors='ignore'),
        'Week': lambda x: pd.to_numeric(x, errors='ignore'),
        'Cmp': lambda x: pd.to_numeric(x, errors='ignore'),
        'Att': lambda x: pd.to_numeric(x, errors='ignore'),
        'Cmp%': lambda x: pd.to_numeric(x, errors='ignore'),
        'Yds': lambda x: pd.to_numeric(x, errors='ignore'),
        'TD': lambda x: pd.to_numeric(x, errors='ignore'),
        'Int': lambda x: pd.to_numeric(x, errors='ignore'),
        'Rate': lambda x: pd.to_numeric(x, errors='ignore'),
        'Sk': lambda x: pd.to_numeric(x, errors='ignore'),
        'Yds.1': lambda x: pd.to_numeric(x, errors='ignore'),
        'Y/A': lambda x: pd.to_numeric(x, errors='ignore'),
        'AY/A': lambda x: pd.to_numeric(x, errors='ignore'),
    }

    logging.info(f'Downloading data from {url}')
    text = session.get(url, headers=headers).text
    df = pd.read_html(text, skiprows=1, header=0, converters=converters)[0]

    # Handle multiple pages
    while 'Next Page' in text:
        # This code essentially increments the 'offset' query
        # parameter by 100
        url = increase_url_offset(url)

        # Continue downloading for however many pages there are
        text = session.get(url, headers=headers).text
        df2 = pd.read_html(text, skiprows=1, header=0,
                           converters=converters)[0]

        # Keep adding the new pages to our existing df dataframe
        df = pd.concat([df, df2])

    # Filter out rows where the data repeats the header row
    df = df.loc[df.Player != 'Player']

    # Let Pandas redetermine the types of the columns now
    # That each column should have homogenous data types
    df = df.infer_objects()

    # Remove bad column from source data
    df = df.drop('Unnamed: 7', axis='columns')

    return df
示例#28
0
    def authenticate(self, session: requests.sessions.Session, email: str,
                     password: str, user_agent: str) -> str:
        session.get(f"{base}/id/api/csrf")
        device_auth_details = AuthUtil.get_device_auth_details(self).get(
            email, {})
        if os.path.isfile(
                "device_auths.json") is False or device_auth_details == {}:
            res = AuthUtil.email_and_password_auth(self, session, email,
                                                   password)
            log.debug(f"\nemail_and_password_auth")
            log.debug(res.status_code)
            log.debug(res.text)

            if res.status_code == 409:
                return AuthUtil.authenticate(self, session, email, password,
                                             user_agent)

            if res.status_code == 431:
                session.get(f"{base}/id/api/csrf")
                res = session.post(f"{base}/id/api/login/mfa",
                                   headers={
                                       "x-xsrf-token":
                                       session.cookies.get("XSRF-TOKEN")
                                   },
                                   data={
                                       "code":
                                       input("Please enter the 2fa code: "),
                                       "method": "authenticator",
                                       "rememberDevice": False
                                   },
                                   cookies=session.cookies)

                if res.status_code == 400:
                    raise ValueError("Wrong 2fa code entered.")

            if not res.status_code == 400:
                res = session.get(f"{base}/id/api/exchange",
                                  headers={
                                      "x-xsrf-token":
                                      session.cookies.get("XSRF-TOKEN")
                                  },
                                  cookies=session.cookies)
                exchange_code_ = res.json()["code"]

                res = session.post(
                    f"{base_public_service}/account/api/oauth/token",
                    headers={"Authorization": f"basic {launcher_token}"},
                    data={
                        "grant_type": "exchange_code",
                        "exchange_code": exchange_code_,
                        "token_type": "eg1"
                    })
                client_id = res.json()["account_id"]
                launcher_access_token = res.json()["access_token"]
                res = AuthUtil.generate_device_auth(self, session, client_id,
                                                    launcher_access_token,
                                                    user_agent)
                if res.status_code == 200:
                    details = {
                        "deviceId": res.json().get("deviceId"),
                        "accountId": res.json().get("accountId"),
                        "secret": res.json().get("secret")
                    }
                    AuthUtil.store_device_auth_details(self, email, details)

            if res.status_code == 400:
                res = AuthUtil.exchange_code_auth(self, session, email)
                log.debug(f"\nexchange_code_auth")
                log.debug(res.status_code)
                log.debug(res.text)

                if res.status_code == 400:
                    raise ValueError("Wrong exchange_code entered.")

                if res.status_code == 200:
                    launcher_access_token = res.json()["access_token"]
                    client_id = res.json()["account_id"]
                    res = AuthUtil.generate_device_auth(
                        self, session, client_id, launcher_access_token,
                        user_agent)

                    if res.status_code == 200:
                        details = {
                            "deviceId": res.json().get("deviceId"),
                            "accountId": res.json().get("accountId"),
                            "secret": res.json().get("secret")
                        }
                        AuthUtil.store_device_auth_details(
                            self, email, details)
        else:
            res = AuthUtil.device_auth(self, session, **device_auth_details)
            log.debug(f"\ndevice_auth")
            log.debug(res.status_code)
            log.debug(res.text)

            if res.status_code == 400:
                raise ValueError("Wrong device auth detail entered.")

            if res.status_code == 200:
                launcher_access_token = res.json()["access_token"]

        return launcher_access_token
示例#29
0
def download_asset(dest: str, filename: str, h: str,
                   s: requests.sessions.Session):
    os.makedirs(dest, exist_ok=True)
    open(os.path.join(dest, filename), "wb").write(
        s.get(f'https://resources.download.minecraft.net/{h[:2]}/{h}').content)
示例#30
0
def roll_dice(session: requests.sessions.Session, server_address: str) -> Dict:
    req = session.get(f"{server_address}/play/roll")
    return req.json()