def download_individual_results_feed( self, cr: int, session: Optional[requests.Session] = None ) -> TopcoderLongContestProblemIndividualResultsFeed: """ .. versionadded:: 6.2.0 This method may be deleted in future. """ session = session or utils.get_default_session() # get url = 'https://community.topcoder.com/longcontest/stats/?module=IndividualResultsFeed&rd={}&cr={}'.format( self.rd, cr) resp = utils.request('GET', url, session=session) # parse def get_text_at(node: xml.etree.ElementTree.Element, i: int) -> str: text = list(node)[i].text if text is None: raise ValueError return text root = xml.etree.ElementTree.fromstring( resp.content.decode(resp.encoding)) assert len(list(root)) == 5 round_id = int(get_text_at(root, 0)) coder_id = int(get_text_at(root, 1)) handle = get_text_at(root, 2) submissions = [ ] # type: List[TopcoderLongContestProblemIndividualResultsFeedSubmission] for submission in list(root)[3]: number = int(get_text_at(submission, 0)) score = float(get_text_at(submission, 1)) language = get_text_at(submission, 2) time = get_text_at(submission, 3) submissions += [ TopcoderLongContestProblemIndividualResultsFeedSubmission( number, score, language, time) ] testcases = [ ] # type: List[TopcoderLongContestProblemIndividualResultsFeedTestCase] for testcase in list(root)[4]: test_case_id = int(get_text_at(testcase, 0)) score = float(get_text_at(testcase, 1)) processing_time = int(get_text_at(testcase, 2)) fatal_error_ind = int(get_text_at(testcase, 3)) testcases += [ TopcoderLongContestProblemIndividualResultsFeedTestCase( test_case_id, score, processing_time, fatal_error_ind) ] return TopcoderLongContestProblemIndividualResultsFeed( round_id, coder_id, handle, submissions, testcases)
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.new_default_session() # get header # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2Fheader_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/header'.format( self.problem_id) resp = utils.request('GET', url, session=session) header = json.loads(resp.content.decode(resp.encoding)) # get testcases via the official API testcases = [] # type: List[TestCase] for header in header['headers']: # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2F%7Bserial%7D_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/{}'.format( self.problem_id, header['serial']) resp = utils.request('GET', url, session=session) testcase = json.loads(resp.content.decode(resp.encoding)) skipped = False for type in ('in', 'out'): if testcase[type].endswith( '..... (terminated because of the limitation)\n'): log.error( 'AOJ API says: terminated because of the limitation') skipped = True if skipped: log.warning("skipped due to the limitation of AOJ API") continue testcases += [ TestCase( header['name'], header['name'], testcase['in'].encode(), header['name'], testcase['out'].encode(), ) ] return testcases
def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # get samples via the official API # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2Fsamples%2F%7BproblemId%7D_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/samples/{}'.format(self.problem_id) resp = utils.request('GET', url, session=session) samples = [] # type: List[TestCase] for i, sample in enumerate(json.loads(resp.content.decode(resp.encoding))): samples += [TestCase( 'sample-{}'.format(i + 1), str(sample['serial']), sample['in'].encode(), str(sample['serial']), sample['out'].encode(), )] # parse HTML if no samples are registered # see: https://github.com/kmyk/online-judge-tools/issues/207 if not samples: logger.warning("sample cases are not registered in the official API") logger.info("fallback: parsing HTML") # reference: http://developers.u-aizu.ac.jp/api?key=judgeapi%2Fresources%2Fdescriptions%2F%7Blang%7D%2F%7Bproblem_id%7D_GET url = 'https://judgeapi.u-aizu.ac.jp/resources/descriptions/ja/{}'.format(self.problem_id) resp = utils.request('GET', url, session=session) html = json.loads(resp.content.decode(resp.encoding))['html'] # list h3+pre zipper = onlinejudge._implementation.testcase_zipper.SampleZipper() expected_strings = ('入力例', '出力例', 'Sample Input', 'Sample Output') soup = bs4.BeautifulSoup(html, utils.html_parser) for pre in soup.find_all('pre'): tag = pre.find_previous_sibling() if tag and tag.name == 'h3' and tag.string and any(s in tag.string for s in expected_strings): s = utils.textfile(utils.parse_content(pre).lstrip()) zipper.add(s.encode(), tag.string) samples = zipper.get() return samples
def download_sample_cases( self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) in_pre, out_pre = soup.find_all('pre', class_='sio') in_p = in_pre.find_previous_sibling('p', class_='pst') out_p = out_pre.find_previous_sibling('p', class_='pst') logger.debug('pre (in): %s', in_pre.contents) logger.debug('pre (out): %s', out_pre.contents) assert in_p.text.strip() == 'Sample Input' assert out_p.text.strip() == 'Sample Output' assert len(in_pre.contents) == len(out_pre.contents) samples = [] # type: List[TestCase] if len(in_pre.contents) == 1: assert isinstance(in_pre.contents[0], bs4.NavigableString) assert isinstance(out_pre.contents[0], bs4.NavigableString) samples += [ TestCase( 'sample', in_p.text.strip(), in_pre.text.encode() + b'\r\n', out_p.text.strip(), out_pre.text.encode() + b'\r\n', ) ] else: assert len(in_pre.contents) % 2 == 0 for i in range(len(in_pre.contents) // 2): in_name = in_pre.contents[2 * i] in_data = in_pre.contents[2 * i + 1] out_name = out_pre.contents[2 * i] out_data = out_pre.contents[2 * i + 1] assert in_name.name == 'b' assert isinstance(in_data, bs4.NavigableString) assert out_name.name == 'b' assert isinstance(out_data, bs4.NavigableString) samples += [ TestCase( 'sample-{}'.format(i + 1), in_name.text.strip(), str(in_data).strip().encode() + b'\r\n', out_name.text.strip(), str(out_data).strip().encode() + b'\r\n', ) ] return samples
def get_input_format(self, session: Optional[requests.Session] = None ) -> Optional[str]: session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) for h4 in soup.find_all('h4'): if h4.string == '入力': return h4.parent.find('pre').decode_contents(formatter=None) return None
def download_sample_cases( self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() url_format = 'https://www.facebook.com/hackercup/example/?problem_id={}&type={}' resp_in = utils.request('GET', url_format.format(self.problem_id, 'input'), session=session) resp_out = utils.request('GET', url_format.format(self.problem_id, 'output'), session=session) sample = TestCase( 'sample', utils.remove_prefix(resp_in.headers['Content-Disposition'], 'attachment;filename='), resp_in.content, utils.remove_prefix(resp_out.headers['Content-Disposition'], 'attachment;filename='), resp_out.content, ) return [sample]
def download_sample_cases(self, session: Optional[requests.Session] = None) -> List[onlinejudge.type.TestCase]: session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) samples = onlinejudge._implementation.testcase_zipper.SampleZipper() for h2 in soup.find_all('h2'): it = self._parse_sample_tag(h2) if it is not None: s, name = it samples.add(s.encode(), name) return samples.get()
def get_available_languages( self, *, session: Optional[requests.Session] = None) -> List[Language]: session = session or utils.get_default_session() url = 'https://yukicoder.me/api/v1/languages' resp = utils.request('GET', url, session=session) data = json.loads(resp.content.decode()) return [ Language(language['Id'], language['Name'] + ' (' + language['Ver'] + ')') for language in data ]
def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER) samples = onlinejudge._implementation.testcase_zipper.SampleZipper() for pre in soup.select('.sample pre'): logger.debug('pre: %s', str(pre)) it = self._parse_sample_tag(pre) if it is not None: data, name = it samples.add(data.encode(), name) return samples.get()
def is_logged_in(self, *, session: Optional[requests.Session] = None) -> bool: session = session or utils.get_default_session() url = 'https://judgeapi.u-aizu.ac.jp/self' resp = utils.request('GET', url, session=session, raise_for_status=False) if resp.status_code != 200: return False data = json.loads(resp.content) logger.debug('self: %s', resp.content) return 'id' in data
def _get_problems( cls, *, session: Optional[requests.Session] = None ) -> List[Dict[str, Any]]: """`_get_problems` wraps the official API and caches the result. """ session = session or utils.get_default_session() if cls._problems is None: url = 'https://yukicoder.me/api/v1/problems' resp = utils.request('GET', url, session=session) cls._problems = json.loads(resp.content.decode()) return cls._problems
def download_overview( self, session: Optional[requests.Session] = None ) -> List[TopcoderLongContestProblemOverviewRow]: """ .. versionadded:: 6.2.0 This method may be deleted in future. """ session = session or utils.get_default_session() # get number = 9999 start = 1 url = 'https://community.topcoder.com/longcontest/stats/?module=ViewOverview&rd={}&nr={}&sr={}'.format( self.rd, number, start) resp = utils.request('GET', url, session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) table = soup.find('table', class_='stat') overview = [] # type: List[TopcoderLongContestProblemOverviewRow] for tr in table.find_all('tr', class_=re.compile(r'light|dark')): tds = tr.find_all('td') assert len(tds) == 9 rank = int(tds[0].text) handle = tds[1].text.strip() provisional_rank = int(tds[2].text) provisional_score = float(tds[3].text) final_score = float(tds[4].text) language = tds[5].text.strip() assert tds[6].text.strip() == 'results' assert tds[7].text.strip() == 'submission history' assert tds[8].text.strip() == 'example history' query = dict( urllib.parse.parse_qsl( urllib.parse.urlparse( tds[6].find('a').attrs['href']).query)) self.pm = query['pm'] overview += [ TopcoderLongContestProblemOverviewRow(rank, handle, provisional_rank, provisional_score, final_score, language, cr=int(query['cr'])) ] return overview
def login(self, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> None: """ :raises LoginError: """ session = session or utils.new_default_session() url = 'https://toph.co/login' # get resp = utils.request('GET', url, session=session) if resp.url != url: # redirected log.info('You are already logged in.') return # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', class_='login-form') log.debug('form: %s', str(form)) username, password = get_credentials() form[ 'action'] = '/login' # to avoid KeyError inside form.request method as Toph does not have any defined action form = utils.FormSender(form, url=resp.url) form.set('handle', username) form.set('password', password) # post resp = form.request(session) resp.raise_for_status() resp = utils.request( 'GET', url, session=session ) # Toph's Location header is not getting the expected value if resp.url != url: log.success('Welcome, %s.', username) else: log.failure('Invalid handle/email or password.') raise LoginError('Invalid handle/email or password.')
def download_data( self, *, session: Optional[requests.Session] = None ) -> CodeforcesContestData: session = session or utils.get_default_session() url = 'https://codeforces.com/api/contest.standings?contestId={}&from=1&count=1'.format( self.contest_id) resp = utils.request('GET', url, session=session) timestamp = datetime.datetime.now(datetime.timezone.utc).astimezone() data = json.loads(resp.content.decode(resp.encoding)) assert data['status'] == 'OK' return CodeforcesContestData._from_json(data['result']['contest'], response=resp, session=session, timestamp=timestamp)
def get_problem_id(self, *, session: Optional[requests.Session] = None) -> str: """ :note: use http://developers.u-aizu.ac.jp/api?key=judgeapi%2Farenas%2F%7BarenaId%7D%2Fproblems_GET """ if self._problem_id is None: session = session or utils.get_default_session() url = 'https://judgeapi.u-aizu.ac.jp/arenas/{}/problems'.format(self.arena_id) resp = utils.request('GET', url, session=session) problems = json.loads(resp.content.decode(resp.encoding)) for problem in problems: if problem['id'] == self.alphabet: self._problem_id = problem['problemId'] logger.debug('problem: %s', problem) break return self._problem_id
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: """ :raises NotLoggedInError: """ session = session or utils.get_default_session() if not self.get_service().is_logged_in(session=session): raise NotLoggedInError url = 'https://yukicoder.me/problems/no/{}/testcase.zip'.format( self.problem_no) resp = utils.request('GET', url, session=session) fmt = 'test_%e/%s' return onlinejudge._implementation.testcase_zipper.extract_from_zip( resp.content, fmt)
def _get_contests( cls, *, session: Optional[requests.Session] = None ) -> List[Dict[str, Any]]: """`_get_contests` wraps the official API and caches the result. """ session = session or utils.get_default_session() if cls._contests is None: cls._contests = [] for tense in ('past', 'current', 'future'): url = 'https://yukicoder.me/api/v1/contest/{}'.format(tense) resp = utils.request('GET', url, session=session) cls._contests.extend(json.loads(resp.content.decode())) return cls._contests
def _get_model(self, *, session: Optional[requests.Session] = None) -> Dict[str, Any]: """ :raises SubmissionError: """ session = session or utils.get_default_session() # get url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}'.format(self.contest_slug, self.challenge_slug) resp = utils.request('GET', url, session=session) # parse it = json.loads(resp.content.decode()) logger.debug('json: %s', it) if not it['status']: logger.error('get model: failed') raise SubmissionError return it['model']
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # example: https://www.hackerrank.com/rest/contests/hourrank-1/challenges/beautiful-array/download_testcases url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}/download_testcases'.format( self.contest_slug, self.challenge_slug) resp = utils.request('GET', url, session=session, raise_for_status=False) if resp.status_code != 200: log.error('response: %s', resp.content.decode()) return [] return onlinejudge._implementation.testcase_zipper.extract_from_zip( resp.content, '%eput/%eput%s.txt')
def submit_code( self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None ) -> onlinejudge.type.Submission: """ :raises NotLoggedInError: :raises SubmissionError: """ session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', class_='submitForm') if form is None: log.error('not logged in') raise NotLoggedInError log.debug('form: %s', str(form)) # make data form = utils.FormSender(form, url=resp.url) form.set('programTypeId', language_id) form.set_file('sourceFile', filename or 'code', code) resp = form.request(session=session) resp.raise_for_status() # result if resp.url.endswith('/my'): # example: https://codeforces.com/contest/598/my log.success('success: result: %s', resp.url) return utils.DummySubmission(resp.url, problem=self) else: log.failure('failure') # parse error messages soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) msgs = [] # type: List[str] for span in soup.findAll('span', class_='error'): msgs += [span.string] log.warning('Codeforces says: "%s"', span.string) raise SubmissionError( 'it may be the "You have submitted exactly the same code before" error: ' + str(msgs))
def download_sample_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.new_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) samples = utils.SampleZipper() for pre in soup.find_all('pre'): log.debug('pre: %s', str(pre)) it = self._parse_sample_tag(pre) if it is not None: data, name = it samples.add(data, name) return samples.get()
def download_system_cases( self, *, session: Optional[requests.Session] = None) -> List[TestCase]: """ :raises NotLoggedInError: """ session = session or utils.get_default_session() if not self.get_service().is_logged_in(session=session): raise NotLoggedInError url = '{}/testcase.zip'.format(self.get_url()) resp = utils.request('GET', url, session=session) fmt = 'test_%e/%s' return onlinejudge._implementation.testcase_zipper.extract_from_zip( resp.content, fmt, ignore_unmatched_samples=True ) # NOTE: yukicoder's test sets sometimes contain garbages. The owner insists that this is an intended behavior, so we need to ignore them.
def get_available_languages( self, session: Optional[requests.Session] = None) -> List[Language]: session = session or utils.get_default_session() # get # We use the problem page since it is available without logging in resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) select = soup.find('select', id='lang') languages = [] # type: List[Language] for option in select.find_all('option'): languages += [ Language(option.attrs['value'], ' '.join(option.string.split())) ] return languages
def iterate_contest_data( self, *, is_gym: bool = False, session: Optional[requests.Session] = None ) -> Iterator['CodeforcesContestData']: session = session or utils.get_default_session() url = 'https://codeforces.com/api/contest.list?gym={}'.format( 'true' if is_gym else 'false') resp = utils.request('GET', url, session=session) timestamp = datetime.datetime.now(datetime.timezone.utc).astimezone() data = json.loads(resp.content.decode(resp.encoding)) assert data['status'] == 'OK' for row in data['result']: yield CodeforcesContestData._from_json(row, response=resp, session=session, timestamp=timestamp)
def list_problems( self, *, session: Optional[requests.Session] = None ) -> Sequence['YukicoderProblem']: """ :raises RuntimeError: """ session = session or utils.get_default_session() url = 'https://yukicoder.me/api/v1/contest/id/{}'.format( self.contest_id) resp = utils.request('GET', url, session=session) data = json.loads(resp.content.decode()) return [ YukicoderProblem(problem_id=problem_id) for problem_id in data['ProblemIdList'] ]
def download_sample_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.new_default_session() # get samples via the official API # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2Fsamples%2F%7BproblemId%7D_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/samples/{}'.format( self.problem_id) resp = utils.request('GET', url, session=session) samples = [] # type: List[TestCase] for sample in json.loads(resp.content.decode(resp.encoding)): samples += [ TestCase( LabeledString(str(sample['serial']), sample['in']), LabeledString(str(sample['serial']), sample['out']), ) ] return samples
def _get_lang_display_mapping(self, *, session: Optional[requests.Session] = None) -> Dict[str, str]: session = session or utils.get_default_session() # get url = 'https://hrcdn.net/hackerrank/assets/codeshell/dist/codeshell-cdffcdf1564c6416e1a2eb207a4521ce.js' # at "Mon Feb 4 14:51:27 JST 2019" resp = utils.request('GET', url, session=session) # parse s = resp.content.decode() l = s.index('lang_display_mapping:{c:"C",') l = s.index('{', l) r = s.index('}', l) + 1 s = s[l:r] logger.debug('lang_display_mapping (raw): %s', s) # this is not a json lang_display_mapping = {} for lang in s[1:-2].split('",'): key, value = lang.split(':"') lang_display_mapping[key] = value logger.debug('lang_display_mapping (parsed): %s', lang_display_mapping) return lang_display_mapping
def download_standings(self, *, session: Optional[requests.Session] = None) -> List[TopcoderLongContestProblemStandingsRow]: """ :raises Exception: if redirected to `module=ViewOverview` page .. versionadded:: 6.2.0 This method may be deleted in future. """ session = session or utils.get_default_session() rows = [] # type: List[TopcoderLongContestProblemStandingsRow] for start in itertools.count(1): # get url = 'https://community.topcoder.com/longcontest/?module=ViewStandings&rd={}&nr=100&sr={}'.format(self.rd, start) resp = utils.request('GET', url, allow_redirects=False, session=session) if resp.status_code != 200: raise RuntimeError('failed to get {}'.format(url)) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) table = soup.find('table', class_='statTable') for tr in table.find_all('tr')[2:]: # NOTE: first two rows are headings row = collections.OrderedDict() # type: Dict[str, str] tds = tr.find_all('td') texts = [td.text.strip() for td in tds] # NOTE: some cells may be empty strings assert len(texts) == 7 handle = texts[0] score = float(texts[1]) if texts[1] else None rank = int(texts[2]) if texts[2] else None last_submission_time = texts[3] or None language = texts[4] or None example_tests = int(tds[5].text) submissions = int(tds[6].text) href = (tds[5].find('a') or tds[6].find('a')).attrs['href'] query = dict(urllib.parse.parse_qsl(urllib.parse.urlparse(href).query)) self.compid = query['compid'] rows += [TopcoderLongContestProblemStandingsRow(handle, score, rank, last_submission_time, language, example_tests, submissions, cr=int(query['cr']))] # check whether the next page exists link = soup.find('a', text='next >>') if link is None: break return rows
def submit_code( self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None ) -> onlinejudge.type.Submission: """ :raises NotLoggedInError: """ # NOTE: An implementation with the official API exists at 492d8d7. This is reverted at 2b7e6f5 because the API ignores cookies and says "提出するにはログインが必要です" at least at that time. session = session or utils.get_default_session() # get url = self.get_url() + '/submit' resp = utils.request('GET', url, session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER) form = soup.find('form', id='submit_form') if not form: logger.error('form not found') raise NotLoggedInError # post form = utils.FormSender(form, url=resp.url) form.set('lang', language_id) form.set_file('file', filename or 'code', code) form.unset('custom_test') resp = form.request(session=session) resp.raise_for_status() # result if 'submissions' in resp.url: # example: https://yukicoder.me/submissions/314087 logger.info('success: result: %s', resp.url) return utils.DummySubmission(resp.url, problem=self) else: logger.error('failure') soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER) for div in soup.findAll('div', attrs={'role': 'alert'}): logger.warning('yukicoder says: "%s"', div.string) raise SubmissionError
def login(self, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> None: """ :raises LoginError: """ session = session or utils.new_default_session() url = 'https://www.hackerrank.com/auth/login' # get resp = utils.request('GET', url, session=session) if resp.url != url: log.info('You have already signed in.') return # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) csrftoken = soup.find('meta', attrs={ 'name': 'csrf-token' }).attrs['content'] tag = soup.find('input', attrs={'name': 'username'}) while tag.name != 'form': tag = tag.parent form = tag # post username, password = get_credentials() form = utils.FormSender(form, url=resp.url) form.set('login', username) form.set('password', password) form.set('remember_me', 'true') form.set('fallback', 'true') resp = form.request(session, method='POST', action='/rest/auth/login', headers={'X-CSRF-Token': csrftoken}) resp.raise_for_status() # result if '/auth' not in resp.url: log.success('You signed in.') else: log.failure('You failed to sign in. Wrong user ID or password.') raise LoginError( 'You failed to sign in. Wrong user ID or password.')