Exemple #1
0
 def login(self,
           *,
           get_credentials: onlinejudge.type.CredentialsProvider,
           session: Optional[requests.Session] = None) -> None:
     """
     :raises LoginError:
     """
     session = session or utils.get_default_session()
     url = 'https://codeforces.com/enter'
     # get
     resp = utils.request('GET', url, session=session)
     if resp.url != url:  # redirected
         log.info('You have already signed in.')
         return
     # parse
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                              utils.html_parser)
     form = soup.find('form', id='enterForm')
     log.debug('form: %s', str(form))
     username, password = get_credentials()
     form = utils.FormSender(form, url=resp.url)
     form.set('handleOrEmail', username)
     form.set('password', password)
     form.set('remember', 'on')
     # post
     resp = form.request(session)
     resp.raise_for_status()
     if resp.url != url:  # redirected
         log.success('Welcome, %s.', username)
     else:
         log.failure('Invalid handle or password.')
         raise LoginError('Invalid handle or password.')
    def download_overview(self, *, session: Optional[requests.Session] = None) -> List[TopcoderLongContestProblemOverviewRow]:
        """
        .. versionadded:: 6.2.0
            This method may be deleted in future.
        """
        session = session or utils.get_default_session()

        # get
        number = 9999
        start = 1
        url = 'https://community.topcoder.com/longcontest/stats/?module=ViewOverview&rd={}&nr={}&sr={}'.format(self.rd, number, start)
        resp = utils.request('GET', url, session=session)

        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
        table = soup.find('table', class_='stat')
        overview = []  # type: List[TopcoderLongContestProblemOverviewRow]
        for tr in table.find_all('tr', class_=re.compile(r'light|dark')):
            tds = tr.find_all('td')
            assert len(tds) == 9
            rank = int(tds[0].text)
            handle = tds[1].text.strip()
            provisional_rank = int(tds[2].text)
            provisional_score = float(tds[3].text)
            final_score = float(tds[4].text)
            language = tds[5].text.strip()
            assert tds[6].text.strip() == 'results'
            assert tds[7].text.strip() == 'submission history'
            assert tds[8].text.strip() == 'example history'
            query = dict(urllib.parse.parse_qsl(urllib.parse.urlparse(tds[6].find('a').attrs['href']).query))
            self.pm = query['pm']
            overview += [TopcoderLongContestProblemOverviewRow(rank, handle, provisional_rank, provisional_score, final_score, language, cr=int(query['cr']))]
        return overview
    def _load_details(self,
                      session: Optional[requests.Session] = None,
                      lang: str = 'en'):
        assert lang in ('en', 'ja')
        session = session or utils.get_default_session()
        resp = _request('GET',
                        self.get_url(type='beta', lang=lang),
                        session=session)
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                                 utils.html_parser)

        contest_name, _, _ = soup.find('title').text.rpartition(' - ')
        contest_duration = soup.find('small', class_='contest-duration')
        self._start_time, end_time = [
            self._parse_start_time(a['href'])
            for a in contest_duration.find_all('a')
        ]
        self._duration = end_time - self._start_time
        if lang == 'en':
            self._contest_name_en = contest_name
        elif lang == 'ja':
            self._contest_name_ja = contest_name
        else:
            assert False
        _, _, self._can_participate = soup.find(
            'span',
            text=re.compile(r'^(Can Participate|参加対象): ')).text.partition(': ')
        _, _, self._rated_range = soup.find(
            'span',
            text=re.compile(r'^(Rated Range|Rated対象): ')).text.partition(': ')
        penalty_text = soup.find('span',
                                 text=re.compile(r'^(Penalty|ペナルティ): ')).text
        m = re.match(r'(Penalty|ペナルティ): (\d+)( minutes?|分)', penalty_text)
        assert m
        self._penalty = datetime.timedelta(minutes=int(m.group(2)))
    def download_system_test(
            self,
            test_case_id: int,
            session: Optional[requests.Session] = None) -> str:
        """
        :raises NotLoggedInError:
        :note: You need to parse this result manually.

        .. versionadded:: 6.2.0
            This method may be deleted in future.
        """
        session = session or utils.get_default_session()

        # get
        assert self.pm is not None
        url = 'https://community.topcoder.com/longcontest/stats/?module=ViewSystemTest&rd={}&pm={}&tid={}'.format(
            self.rd, self.pm, test_case_id)
        resp = utils.request('GET', url, session=session)

        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                                 utils.html_parser)
        if soup.find('form', attrs={'name': 'frmLogin'}):
            raise NotLoggedInError
        return soup.find('pre').text
Exemple #5
0
    def get_available_languages(
            self,
            *,
            session: Optional[requests.Session] = None) -> List[Language]:
        """
        :raises NotLoggedInError:
        """

        session = session or utils.get_default_session()

        # get
        url = 'https://kcs.miz-miz.biz/contest/{}/submit/{}'.format(
            self.contest_id, self.problem_id)
        resp = utils.request('GET', url, session=session)

        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                                 utils.HTML_PARSER)
        select = soup.find('select', attrs={'name': 'language'})
        if select is None:
            raise NotLoggedInError
        languages = []  # type: List[Language]
        for option in select.findAll('option'):
            languages += [Language(option.attrs['value'], option.string)]
        return languages
Exemple #6
0
    def iterate_contest_data(self, *, lang: str = 'ja', session: Optional[requests.Session] = None) -> Iterator['AtCoderContestData']:
        """
        :param lang: must be `ja` (default) or `en`.
        :note: `lang=ja` is required to see some Japanese-local contests.
        :note: You can use `lang=en` to see the English names of contests.
        """

        assert lang in ('ja', 'en')
        session = session or utils.get_default_session()
        last_page = None
        for page in itertools.count(1):  # 1-based
            if last_page is not None and page > last_page:
                break

            # get
            url = 'https://atcoder.jp/contests/archive?lang={}&page={}'.format(lang, page)
            resp = _request('GET', url, session=session)
            timestamp = datetime.datetime.now(datetime.timezone.utc).astimezone()

            # parse
            soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
            if last_page is None:
                last_page = int(soup.find('ul', class_='pagination').find_all('li')[-1].text)
                log.debug('last page: %s', last_page)
            tbody = soup.find('tbody')
            for tr in tbody.find_all('tr'):
                yield AtCoderContestData._from_table_row(tr, lang=lang, response=resp, session=session, timestamp=timestamp)
Exemple #7
0
    def submit_code(self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None) -> onlinejudge.type.Submission:
        """
        :raises NotLoggedInError:
        """

        # NOTE: An implementation with the official API exists at 492d8d7. This is reverted at 2b7e6f5 because the API ignores cookies and says "提出するにはログインが必要です" at least at that time.

        session = session or utils.get_default_session()
        # get
        url = self.get_url() + '/submit'
        resp = utils.request('GET', url, session=session)
        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER)
        form = soup.find('form', id='submit_form')
        if not form:
            logger.error('form not found')
            raise NotLoggedInError
        # post
        form = utils.FormSender(form, url=resp.url)
        form.set('lang', language_id)
        form.set_file('file', filename or 'code', code)
        form.unset('custom_test')
        resp = form.request(session=session)
        resp.raise_for_status()
        # result
        if 'submissions' in resp.url:
            # example: https://yukicoder.me/submissions/314087
            logger.info('success: result: %s', resp.url)
            return utils.DummySubmission(resp.url, problem=self)
        else:
            logger.error('failure')
            soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER)
            for div in soup.findAll('div', attrs={'role': 'alert'}):
                logger.warning('yukicoder says: "%s"', div.string)
            raise SubmissionError
 def download_sample_cases(
     self,
     *,
     session: Optional[requests.Session] = None
 ) -> List[onlinejudge.type.TestCase]:
     session = session or utils.get_default_session()
     # get
     resp = utils.request('GET', self.get_url(), session=session)
     # parse
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                              utils.html_parser)
     samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
     for tag in soup.find_all('div', class_=re.compile(
             '^(in|out)put$')):  # Codeforces writes very nice HTML :)
         log.debug('tag: %s', str(tag))
         assert len(list(tag.children))
         title, pre = list(tag.children)
         assert 'title' in title.attrs['class']
         assert pre.name == 'pre'
         s = ''
         for it in pre.children:
             if it.name == 'br':
                 s += '\n'
             else:
                 s += it.string
         s = s.lstrip()
         samples.add(s.encode(), title.string)
     return samples.get()
Exemple #9
0
 def download_sample_cases(
     self,
     *,
     session: Optional[requests.Session] = None
 ) -> List[onlinejudge.type.TestCase]:
     session = session or utils.get_default_session()
     # get
     resp = utils.request('GET', self.get_url(), session=session)
     # parse
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                              utils.html_parser)
     samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
     for tag in soup.find_all('div', class_=re.compile(
             '^(in|out)put$')):  # Codeforces writes very nice HTML :)
         log.debug('tag: %s', str(tag))
         non_empty_children = [
             child for child in tag.children if child.name or child.strip()
         ]
         log.debug("tags after removing empty strings: %s",
                   non_empty_children)
         assert len(non_empty_children
                    ) == 2  # if not 2, next line throws ValueError.
         title, pre = list(non_empty_children)
         assert 'title' in title.attrs['class']
         assert pre.name == 'pre'
         data = utils.format_sample_case(str(utils.parse_content(pre)))
         samples.add(data.encode(), title.string)
     return samples.get()
Exemple #10
0
    def download_system_cases(
            self,
            session: Optional[requests.Session] = None) -> List[TestCase]:
        session = session or utils.get_default_session()

        # get header
        # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2Fheader_GET
        url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/header'.format(
            self.problem_id)
        resp = utils.request('GET', url, session=session)
        header = json.loads(resp.content.decode(resp.encoding))

        # get testcases via the official API
        testcases = []  # type: List[TestCase]
        for header in header['headers']:
            # NOTE: the endpoints are not same to http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2F%7Bserial%7D_GET since the json API often says "..... (terminated because of the limitation)"
            url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/{}'.format(
                self.problem_id, header['serial'])
            resp_in = utils.request('GET', url + '/in', session=session)
            resp_out = utils.request('GET', url + '/out', session=session)
            testcases += [
                TestCase(
                    header['name'],
                    header['name'],
                    resp_in.content,
                    header['name'],
                    resp_out.content,
                )
            ]
        return testcases
    def login(self,
              get_credentials: onlinejudge.type.CredentialsProvider,
              session: Optional[requests.Session] = None) -> None:
        """
        :raises LoginError:
        """
        session = session or utils.get_default_session()

        # NOTE: you can see this login page with https://community.topcoder.com/longcontest/?module=Submit
        url = 'https://community.topcoder.com/longcontest/'
        username, password = get_credentials()
        data = {
            'nextpage': 'https://www.topcoder.com/',
            'module': 'Login',
            'ha': username,
            'pass': password,
            'rem': 'on',
        }
        resp = utils.request('POST', url, session=session, data=data)

        if 'longcontest' not in resp.url:
            log.success('Success')
        else:
            log.failure('Failure')
            raise LoginError
Exemple #12
0
    def submit_code(self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None) -> onlinejudge.type.Submission:
        """
        :raises NotLoggedInError:
        :raises SubmissionError:
        """

        session = session or utils.get_default_session()
        if not self.get_service().is_logged_in(session=session):
            raise NotLoggedInError
        # get
        resp = utils.request('GET', self.get_url(), session=session)
        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
        csrftoken = soup.find('meta', attrs={'name': 'csrf-token'}).attrs['content']
        # post
        url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}/submissions'.format(self.contest_slug, self.challenge_slug)
        payload = {'code': code, 'language': str(language_id), 'contest_slug': self.contest_slug}
        logger.debug('payload: %s', payload)
        resp = utils.request('POST', url, session=session, json=payload, headers={'X-CSRF-Token': csrftoken})
        # parse
        it = json.loads(resp.content.decode())
        logger.debug('json: %s', it)
        if not it['status']:
            logger.error('Submit Code: failed')
            raise SubmissionError
        model_id = it['model']['id']
        url = self.get_url().rstrip('/') + '/submissions/code/{}'.format(model_id)
        logger.info('success: result: %s', url)
        return utils.DummySubmission(url, problem=self)
Exemple #13
0
 def download_sample_cases(
     self,
     *,
     session: Optional[requests.Session] = None
 ) -> List[onlinejudge.type.TestCase]:
     session = session or utils.get_default_session()
     resp = utils.request('GET', self.get_url(), session=session)
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding),
                              utils.html_parser)
     samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
     for table in soup.find_all('table', class_="samples"):
         log.debug('table: %s', str(table))
         case = table.find('tbody').find('tr')
         assert len(list(case.children)) == 2
         input_pre, output_pre = list(
             map(lambda td: td.find('pre'), list(case.children)))
         assert input_pre.name == 'pre'
         assert output_pre.name == 'pre'
         assert re.search("^preSample.*Input$", input_pre.attrs['id'])
         assert re.search("^preSample.*Output$", output_pre.attrs['id'])
         samples.add(
             utils.parse_content(input_pre).lstrip().encode(), "Input")
         samples.add(
             utils.parse_content(output_pre).lstrip().encode(), "Output")
     return samples.get()
Exemple #14
0
    def login(self, *, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> None:
        """
        :raises LoginError:
        """

        session = session or utils.get_default_session()
        if self.is_logged_in(session=session):
            return

        # get
        url = 'https://atcoder.jp/login'
        resp = _request('GET', url, session=session, allow_redirects=False)

        # parse
        soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
        form = soup.find('form', action='')
        if not form:
            raise LoginError('something wrong')

        # post
        username, password = get_credentials()
        form = utils.FormSender(form, url=resp.url)
        form.set('username', username)
        form.set('password', password)
        resp = form.request(session)
        _list_alert(resp, print_=True)

        # result
        if 'login' not in resp.url:
            log.success('Welcome,')  # AtCoder redirects to the top page if success
        else:
            log.failure('Username or Password is incorrect.')
            raise LoginError
 def is_logged_in(self,
                  *,
                  session: Optional[requests.Session] = None) -> bool:
     session = session or utils.get_default_session()
     url = 'https://www.hackerrank.com/auth/login'
     resp = utils.request('GET', url, session=session)
     return '/auth' not in resp.url
Exemple #16
0
    def iterate_submission_data_where(
            self,
            *,
            me: bool = False,
            problem_id: Optional[str] = None,
            language_id: Optional[LanguageId] = None,
            status: Optional[str] = None,
            user_glob: Optional[str] = None,
            order: Optional[str] = None,
            desc: bool = False,
            lang: Optional[str] = None,
            pages: Optional[Iterator[int]] = None,
            session: Optional[requests.Session] = None  # TODO: in Python 3.5, you cannnot use both "*" and trailing ","
    ) -> Iterator['AtCoderSubmissionData']:
        # yapf: enable
        """
        :note: If you use certain combination of options, then the results may not correct when there are new submissions while crawling.
        :param status: must be one of `AC`, `WA`, `TLE`, `MLE`, `RE`, `CLE`, `OLE`, `IE`, `WJ`, `WR`, or `Judging`
        :param order: must be one of `created`, `score`, `source_length`, `time_consumption`, or `memory_consumption`
        :param me: use the `.../submissions/me` page instead of `.../submission`
        :param user_glob: is used as the value of `f.User` query parameter
        :param language_id: is used as the value of `f.Language` query parameter
        :param lang: must be one of `ja`, `en`
        :param pages: is an iterator to list the page numbers to GET
        """
        assert status in (None, 'AC', 'WA', 'TLE', 'MLE', 'RE', 'CE', 'QLE', 'OLE', 'IE', 'WJ', 'WR', 'Judging')
        assert order in (None, 'created', 'score', 'source_length', 'time_consumption', 'memory_consumption')
        if desc:
            assert order is not None

        base_url = 'https://atcoder.jp/contests/{}/submissions'.format(self.contest_id)
        if me:
            base_url += '/me'
        params = {}
        if problem_id is not None:
            params['f.Task'] = problem_id
        if language_id is not None:
            params['f.Language'] = language_id
        if status is not None:
            params['f.Status'] = status
        if user_glob is not None:
            params['f.User'] = user_glob
        if order is not None:
            params['orderBy'] = order
        if desc:
            params['desc'] = 'true'

        # get
        session = session or utils.get_default_session()
        for page in pages or itertools.count(1):
            params_page = ({'page': str(page)} if page >= 2 else {})
            url = base_url + '?' + urllib.parse.urlencode({**params, **params_page})
            resp = _request('GET', url, session=session)
            timestamp = datetime.datetime.now(datetime.timezone.utc).astimezone()

            submissions = list(self._iterate_submission_data_from_response(resp=resp, session=session, timestamp=timestamp))
            if not submissions:
                break
            yield from submissions
Exemple #17
0
    def download_sample_cases(
            self,
            *,
            session: Optional[requests.Session] = None) -> List[TestCase]:
        session = session or utils.get_default_session()
        url = 'https://www.facebook.com/api/graphql/'

        # get problem_id
        data = {
            'fb_api_req_friendly_name':
            'CodingCompetitionsContestRootQuery',
            'variables':
            json.dumps({
                "series_vanity": self.series_vanity,
                "season_vanity": self.season_vanity,
                "contest_vanity": self.contest_vanity,
            }),
            'doc_id':
            '2709858395781426',
        }  # type: Dict[str, Any]
        resp = utils.request('POST', url, session=session, data=data)
        try:
            result = json.loads(resp.content.decode())  # type: Dict[str, Any]
        except json.decoder.JSONDecodeError:
            raise onlinejudge.type.SampleParseError(
                "The result of Facebook's API is empty. Did you set your User-Agent?"
            )
        contest_id = result['data']['contestSeries']['contestSeason'][
            'contest']['id']

        # get sample URLs
        data = {
            'fb_api_req_friendly_name':
            'CodingCompetitionsContestProblemQuery',
            'variables':
            json.dumps({
                "contest_id": contest_id,
                "series_vanity": self.series_vanity,
                "display_index": self.display_index,
                "participation_type": "UNTIMED_PRACTICE",
                "can_submit": False,
            }),
            'doc_id':
            '3556774947707941',
        }
        resp = utils.request('POST', url, session=session, data=data)
        result = json.loads(resp.content.decode())
        problem_sample_test_case_set = result['data']['contest']['problem'][
            'problem_sample_test_case_set']

        # return the result
        sample = TestCase(
            'sample',
            'Sample Input',
            problem_sample_test_case_set['test_case_set_full_input'].encode(),
            'Sample Output',
            problem_sample_test_case_set['test_case_set_full_output'].encode(),
        )
        return [sample]
Exemple #18
0
    def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]:
        session = session or utils.get_default_session()
        base_url = self.get_url()

        # get csrftoken
        resp = utils.request('GET', base_url, session=session)
        csrftoken = None
        for cookie in session.cookies:
            if cookie.name == 'csrftoken' and cookie.domain == 'csacademy.com':  # type: ignore
                csrftoken = cookie.value  # type: ignore
        if csrftoken is None:
            logger.error('csrftoken is not found')
            return []

        # get config
        headers = {
            'x-csrftoken': csrftoken,
            'x-requested-with': 'XMLHttpRequest',
        }
        contest_url = 'https://csacademy.com/contest/{}/'.format(self.contest_name)
        resp = utils.request('GET', contest_url, session=session, headers=headers)
        # parse config
        assert resp.encoding is None
        config = json.loads(resp.content.decode())  # NOTE: Should I memoize this? Is the CSAcademyRound class required?
        task_config = None
        for it in config['state']['contesttask']:
            if it['name'] == self.task_name:
                task_config = it
        if task_config is None:
            logger.error('no such task: %s', self.task_name)
            return []

        # get
        get_contest_task_url = 'https://csacademy.com/contest/get_contest_task/'
        payload = {'contestTaskId': (None, str(task_config['id']))}
        headers = {
            'x-csrftoken': csrftoken,
            'x-requested-with': 'XMLHttpRequest',
            'Referer': base_url,
        }
        resp = utils.request('POST', get_contest_task_url, session=session, files=payload, headers=headers)
        # parse
        assert resp.encoding is None
        contest_task = json.loads(resp.content.decode())  # NOTE: Should I memoize this?
        if contest_task.get('title') == 'Page not found':
            logger.error('something wrong')
            return []
        samples = []
        for test_number, example_test in enumerate(contest_task['state']['EvalTask'][0]['exampleTests']):
            inname = 'Input {}'.format(test_number)
            outname = 'Output {}'.format(test_number)
            samples += [TestCase(
                'sample-{}'.format(test_number + 1),
                inname,
                example_test['input'].encode(),
                outname,
                example_test['output'].encode(),
            )]
        return samples
Exemple #19
0
 def is_logged_in(self,
                  *,
                  session: Optional[requests.Session] = None) -> bool:
     session = session or utils.get_default_session()
     url = 'https://yukicoder.me'
     resp = utils.request('GET', url, session=session)
     assert resp.status_code == 200
     return 'login-btn' not in str(resp.content)
Exemple #20
0
 def is_logged_in(self, session: Optional[requests.Session] = None) -> bool:
     session = session or utils.get_default_session()
     url = 'https://codeforces.com/enter'
     resp = utils.request('GET',
                          url,
                          session=session,
                          allow_redirects=False)
     return resp.status_code == 302
Exemple #21
0
    def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]:
        session = session or utils.get_default_session()
        if self.domain == 'codingcompetitions.withgoogle.com':
            url = 'https://codejam.googleapis.com/dashboard/{}/poll?p=e30'.format(self.contest_id)
            resp = utils.request('GET', url, session=session)
            data = json.loads(base64.urlsafe_b64decode(resp.content + b'=' * ((-len(resp.content)) % 4)).decode())
            logger.debug('%s', data)

            # parse JSON
            for task in data['challenge']['tasks']:
                if task['id'] == self.problem_id:
                    statement = task['statement']
                    break
            else:
                raise SampleParseError("the problem {} is not found in the challenge {}".format(repr(self.problem_id), repr(self.contest_id)))

        elif self.domain == 'code.google.com':
            try:
                url = 'https://{}/{}/contest/{}/dashboard/ContestInfo'.format(self.domain, self.kind, self.contest_id)
                resp = utils.request('GET', url, session=session)
            except requests.HTTPError:
                logger.warning('hint: Google Code Jam moves old problems to the new platform')
                raise
            data = json.loads(resp.content.decode())

            # parse JSON
            assert self.problem_id.startswith('p')
            i = int(self.problem_id[1:])
            statement = data['problems'][i]['body']

        else:
            assert False

        # parse HTML
        soup = bs4.BeautifulSoup(statement, utils.HTML_PARSER)
        io_contents = soup.find_all('pre', class_='io-content')
        if len(io_contents) % 2 != 0:
            raise SampleParseError("""the number of <pre class="io-content"> is not multiple of two""")

        input_contents = islice(io_contents, 0, None, 2)
        output_contents = islice(io_contents, 1, None, 2)

        samples = []

        for index, (input_content, output_content) in enumerate(zip(input_contents, output_contents)):
            if input_content.text.startswith('Case #'):
                logger.warning('''the sample input starts with "Case #"''')
            if not output_content.text.startswith('Case #'):
                logger.warning('''the sample output doesn't start with "Case #"''')
            samples.append(TestCase(
                'sample-{}'.format(index + 1),
                'Input {}'.format(index + 1),
                utils.textfile(input_content.text.rstrip()).encode(),
                'Output {}'.format(index + 1),
                utils.textfile(output_content.text.rstrip()).encode(),
            ))

        return samples
 def is_logged_in(self, *, session: Optional[requests.Session] = None) -> bool:
     """
     .. versionadded:: 6.2.0
     """
     session = session or utils.get_default_session()
     url = 'https://community.topcoder.com/longcontest/stats/?module=ViewSystemTest&rd=17143&pm=14889&tid=33800773'
     resp = utils.request('GET', url, session=session)
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
     return soup.find('form', attrs={'name': 'frmLogin'}) is None
Exemple #23
0
 def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[onlinejudge.type.TestCase]:
     """
     :raises requests.exceptions.HTTPError: if no such problem exists
     :raises SampleParseError: if parsing failed
     """
     session = session or utils.get_default_session()
     resp = _request('GET', self.get_url(type='beta'), session=session)
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
     return AtCoderProblemDetailedData._parse_sample_cases(soup)
Exemple #24
0
    def iterate_submissions_where(
        self,
        me: bool = False,
        problem_id: Optional[str] = None,
        language_id: Optional[LanguageId] = None,
        status: Optional[str] = None,
        user_glob: Optional[str] = None,
        order: Optional[str] = None,
        desc: bool = False,
        lang: Optional[str] = None,
        session: Optional[requests.Session] = None
    ) -> Generator['AtCoderSubmission', None, None]:
        """
        :note: If you use certain combination of options, then the results may not correct when there are new submissions while crawling.
        :param status: must be one of `AC`, `WA`, `TLE`, `MLE`, `RE`, `CLE`, `OLE`, `IE`, `WJ`, `WR`, or `Judging`
        :param order: must be one of `created`, `score`, `source_length`, `time_consumption`, or `memory_consumption`
        """
        assert status in (None, 'AC', 'WA', 'TLE', 'MLE', 'RE', 'CE', 'QLE',
                          'OLE', 'IE', 'WJ', 'WR', 'Judging')
        assert order in (None, 'created', 'score', 'source_length',
                         'time_consumption', 'memory_consumption')
        if desc:
            assert order is not None

        base_url = 'https://atcoder.jp/contests/{}/submissions'.format(
            self.contest_id)
        if me:
            base_url += '/me'
        params = {}
        if problem_id is not None:
            params['f.Task'] = problem_id
        if language_id is not None:
            params['f.Language'] = language_id
        if status is not None:
            params['f.Status'] = status
        if user_glob is not None:
            params['f.User'] = user_glob
        if order is not None:
            params['orderBy'] = order
        if desc:
            params['desc'] = 'true'

        # get
        session = session or utils.get_default_session()
        for page in itertools.count(1):
            params_page = ({'page': str(page)} if page >= 2 else {})
            url = base_url + '?' + urllib.parse.urlencode({
                **params,
                **params_page
            })
            resp = _request('GET', url, session=session)

            submissions = list(self._iterate_submissions_from_response(resp))
            if not submissions:
                break
            yield from submissions
Exemple #25
0
    def download_sample_cases(
            self,
            *,
            session: Optional[requests.Session] = None) -> List[TestCase]:
        session = session or utils.get_default_session()
        if self.domain == 'codingcompetitions.withgoogle.com':
            url = 'https://codejam.googleapis.com/dashboard/{}/poll?p=e30'.format(
                self.contest_id)
            resp = utils.request('GET', url, session=session)
            data = json.loads(
                base64.urlsafe_b64decode(resp.content + b'=' *
                                         ((-len(resp.content)) % 4)).decode())
            log.debug('%s', data)

            # parse JSON
            for task in data['challenge']['tasks']:
                if task['id'] == self.problem_id:
                    statement = task['statement']
                    break
            else:
                raise SampleParseError(
                    "the problem {} is not found in the challenge {}".format(
                        repr(self.problem_id), repr(self.contest_id)))

        elif self.domain == 'code.google.com':
            url = 'https://{}/{}/contest/{}/dashboard/ContestInfo'.format(
                self.domain, self.kind, self.contest_id)
            resp = utils.request('GET', url, session=session)
            data = json.loads(resp.content.decode())

            # parse JSON
            assert self.problem_id.startswith('p')
            i = int(self.problem_id[1:])
            statement = data['problems'][i]['body']

        else:
            assert False

        # parse HTML
        soup = bs4.BeautifulSoup(statement, utils.html_parser)
        io_contents = soup.find_all('pre', class_='io-content')
        if len(io_contents) != 2:
            raise SampleParseError(
                """the number of <pre class="io-content"> is not two""")
        if io_contents[0].text.startswith('Case #'):
            log.warning('''the sample input starts with "Case #"''')
        if not io_contents[1].text.startswith('Case #'):
            log.warning('''the sample output doesn't start with "Case #"''')
        sample = TestCase(
            'sample',
            'Input',
            utils.textfile(io_contents[0].text.rstrip()).encode(),
            'Output',
            utils.textfile(io_contents[1].text.rstrip()).encode(),
        )
        return [sample]
Exemple #26
0
 def download_system_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]:
     session = session or utils.get_default_session()
     # example: https://www.hackerrank.com/rest/contests/hourrank-1/challenges/beautiful-array/download_testcases
     url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}/download_testcases'.format(self.contest_slug, self.challenge_slug)
     resp = utils.request('GET', url, session=session, raise_for_status=False)
     if resp.status_code == 403:
         logger.debug('HTML: %s', resp.content.decode())
         raise onlinejudge.type.SampleParseError("Access Denied. Did you set your User-Agent?")
     resp.raise_for_status()
     return onlinejudge._implementation.testcase_zipper.extract_from_zip(resp.content, '%eput/%eput%s.txt')
Exemple #27
0
 def is_logged_in(self,
                  *,
                  session: Optional[requests.Session] = None) -> bool:
     session = session or utils.get_default_session()
     url = 'https://toph.co/login'
     resp = utils.request('GET',
                          url,
                          session=session,
                          allow_redirects=False)
     return resp.status_code != 200
Exemple #28
0
 def get_input_format(self, *, session: Optional[requests.Session] = None) -> Optional[str]:
     session = session or utils.get_default_session()
     # get
     resp = utils.request('GET', self.get_url(), session=session)
     # parse
     soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.HTML_PARSER)
     for h4 in soup.find_all('h4'):
         if h4.string == '入力':
             return h4.parent.find('pre').decode_contents(formatter=None)
     return None
Exemple #29
0
    def list_problems(self, *, session: Optional[requests.Session] = None) -> Sequence['YukicoderProblem']:
        """
        :raises RuntimeError:
        """

        session = session or utils.get_default_session()
        url = 'https://yukicoder.me/api/v1/contest/id/{}'.format(self.contest_id)
        resp = utils.request('GET', url, session=session)
        data = json.loads(resp.content.decode())
        return [YukicoderProblem(problem_id=problem_id) for problem_id in data['ProblemIdList']]
Exemple #30
0
    def _get_problems(cls, *, session: Optional[requests.Session] = None) -> List[Dict[str, Any]]:
        """`_get_problems` wraps the official API and caches the result.
        """

        session = session or utils.get_default_session()
        if cls._problems is None:
            url = 'https://yukicoder.me/api/v1/problems'
            resp = utils.request('GET', url, session=session)
            cls._problems = json.loads(resp.content.decode())
        return cls._problems