def generate_scanner(args: 'argparse.Namespace') -> None: if not args.silent: log.warning('This feature is ' + log.red('experimental') + '.') if args.silent: for handler in log.logger.handlers: log.removeHandler(handler) problem = onlinejudge.dispatch.problem_from_url(args.url) if problem is None: sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: it = problem.get_input_format(session=sess) # type: Any if not it: log.error('input format not found') sys.exit(1) try: log.debug('original data: %s', repr(it)) it = list(tokenize(it)) log.debug('tokenized: %s', str(it)) it = list(parse(it)) log.debug('parsed: %s', str(it)) it = postprocess(it) log.debug('postprocessed: %s', str(it)) it = export(it, use_scanf=args.scanf, repeat_macro=args.repeat_macro) log.debug('result: %s', repr(it)) except: log.error('something wrong') raise log.success('success:') print(log.bold(it.rstrip())) # to stdout
def _get_problem_directory_path(self) -> pathlib.Path: path = LibraryCheckerService._get_cloned_repository_path() info_tomls = list(path.glob('**/{}/info.toml'.format(glob.escape(self.problem_id)))) if len(info_tomls) != 1: log.error("the problem %s not found or broken", self.problem_id) raise RuntimeError() return info_tomls[0].parent
def extract_from_files(files: Iterator[Tuple[str, bytes]], format: str = '%s.%e', out: str = 'out') -> List[TestCase]: """ :param out: is the extension for output files. This is used when the zip-file contains files like `sample-1.ans` instead of `sample-1.out`. """ table = { 's': r'[^/]+', 'e': r'(in|{})'.format(out), } names = collections.defaultdict( dict) # type: Dict[str, Dict[str, Tuple[str, bytes]]] for filename, content in files: m = onlinejudge._implementation.format_utils.percentparse( filename, format, table) assert m assert m['e'] not in names[m['s']] names[m['s']][m['e']] = (filename, content) testcases = [] # type: List[TestCase] for name in sorted(names.keys()): data = names[name] if 'in' not in data or out not in data: log.error('dangling sample found: %s', str(data)) assert False else: testcases += [TestCase(name, *data['in'], *data[out])] return testcases
def get_available_languages( self, session: Optional[requests.Session] = None) -> List[Language]: """ :raises NotLoggedInError: """ session = session or utils.new_default_session() # get resp = _request('GET', self.get_url(type='beta'), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', action='/contests/{}/submit'.format(self.contest_id)) if form is None: log.error('not logged in') raise NotLoggedInError # parse select = form.find('div', id='select-lang').find( 'select', attrs={'name': 'data.LanguageId'} ) # NOTE: AtCoder can vary languages depending on tasks, even in one contest. here, ignores this fact. languages = [] # type: List[Language] for option in select.find_all('option'): languages += [Language(option.attrs['value'], option.string)] return languages
def get_available_languages( self, session: Optional[requests.Session] = None) -> List[Language]: """ :raises NotLoggedInError: """ session = session or utils.new_default_session() # get url = 'http://{}.contest.atcoder.jp/submit'.format(self.contest_id) resp = _request('GET', url, session=session) msgs = AtCoderService._get_messages_from_cookie(resp.cookies) if AtCoderService._report_messages(msgs, unexpected=True): return [] # check whether logged in path = utils.normpath(urllib.parse.urlparse(resp.url).path) if path.startswith('/login'): log.error('not logged in') raise NotLoggedInError # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) select = soup.find( 'select', class_='submit-language-selector' ) # NOTE: AtCoder can vary languages depending on tasks, even in one contest. here, ignores this fact. languages = [] # type: List[Language] for option in select.find_all('option'): languages += [Language(option.attrs['value'], option.string)] return languages
def _update_cloned_repository(cls) -> None: if cls.is_repository_updated: return try: subprocess.check_call(['git', '--version'], stdout=sys.stdout, stderr=sys.stderr) except FileNotFoundError: log.error('git command not found') raise path = LibraryCheckerService._get_cloned_repository_path() if not path.exists(): # init the problem repository url = 'https://github.com/yosupo06/library-checker-problems' log.status('$ git clone %s %s', url, path) subprocess.check_call( ['git', 'clone', url, str(path)], stdout=sys.stdout, stderr=sys.stderr) else: # sync the problem repository log.status('$ git -C %s pull', str(path)) subprocess.check_call(['git', '-C', str(path), 'pull'], stdout=sys.stdout, stderr=sys.stderr) cls.is_repository_updated = True
def get_latest_version_from_pypi() -> str: pypi_url = 'https://pypi.org/pypi/{}/json'.format(version.__package_name__) version_cache_path = cache_dir / "pypi.json" update_interval = 60 * 60 * 8 # 8 hours # load cache if version_cache_path.exists(): with version_cache_path.open() as fh: cache = json.load(fh) if time.time() < cache['time'] + update_interval: return cache['version'] # get try: resp = request('GET', pypi_url, session=requests.Session()) data = json.loads(resp.content.decode()) value = data['info']['version'] except requests.RequestException as e: log.error(str(e)) value = '0.0.0' # ignore since this failure is not important cache = { 'time': int( time.time() ), # use timestamp because Python's standard datetime library is too weak to parse strings 'version': value, } # store cache version_cache_path.parent.mkdir(parents=True, exist_ok=True) with version_cache_path.open('w') as fh: json.dump(cache, fh) return value
def extract_from_zip(zip_data: bytes, format: str, out: str = 'out') -> List[TestCase]: """ :param out: is the extension for output files. This is used when the zip-file contains files like `sample-1.ans` instead of `sample-1.out`. """ table = { 's': r'[^/]+', 'e': r'(in|{})'.format(out), } names = collections.defaultdict( dict) # type: Dict[str, Dict[str, Tuple[str, bytes]]] with zipfile.ZipFile(io.BytesIO(zip_data)) as fh: for filename in fh.namelist(): if filename.endswith( '/' ): # TODO: use `fh.getinfo(filename).is_dir()` after we stop supporting Python 3.5 continue m = onlinejudge._implementation.format_utils.percentparse( filename, format, table) assert m assert m['e'] not in names[m['s']] names[m['s']][m['e']] = (filename, fh.read(filename)) testcases = [] # type: List[TestCase] for name in sorted(names.keys()): data = names[name] if 'in' not in data or out not in data: log.error('dangling sample found: %s', str(data)) assert False else: testcases += [TestCase(name, *data['in'], *data[out])] return testcases
def call_generate_scanner(self, url, expected, options=[]): cmd = [self.ojtools, '-v', 'generate-scanner', url] + options output = subprocess.check_output(cmd, stderr=sys.stderr).decode() log.status('result:\n%s', output) if expected != output: log.error('expected:\n%s' % expected) self.assertEqual(expected, output) time.sleep(1)
def get_available_languages(self, *, session: Optional[requests.Session] = None) -> List[Language]: """ :raises NotLoggedInError: """ data = self.download_data(session=session) if data.available_languages is None: log.error('not logged in') raise NotLoggedInError return data.available_languages
def submit_code(self, code: bytes, language_id: LanguageId, filename: Optional[str] = None, session: Optional[requests.Session] = None) -> Submission: """ :raises NotLoggedInError: :raises SubmissionError: """ assert language_id in [ language.id for language in self.get_available_languages(session=session) ] session = session or utils.new_default_session() # get url = 'http://{}.contest.atcoder.jp/submit'.format( self.contest_id) # TODO: use beta.atcoder.jp resp = _request('GET', url, session=session) msgs = AtCoderService._get_messages_from_cookie(resp.cookies) if AtCoderService._report_messages(msgs, unexpected=True): raise SubmissionError # check whether logged in path = utils.normpath(urllib.parse.urlparse(resp.url).path) if path.startswith('/login'): log.error('not logged in') raise NotLoggedInError # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', action=re.compile(r'^/submit\?task_id=')) if not form: log.error('form not found') raise SubmissionError log.debug('form: %s', str(form)) # post task_id = self._get_task_id(session=session) form = utils.FormSender(form, url=resp.url) form.set('task_id', str(task_id)) form.set('source_code', code) form.set('language_id_{}'.format(task_id), str(language_id)) resp = form.request(session=session) resp.raise_for_status() # result msgs = AtCoderService._get_messages_from_cookie(resp.cookies) AtCoderService._report_messages(msgs) if '/submissions/me' in resp.url: # example: https://practice.contest.atcoder.jp/submissions/me#32174 # CAUTION: this URL is not a URL of the submission log.success('success: result: %s', resp.url) # NOTE: ignore the returned legacy URL and use beta.atcoder.jp's one url = 'https://beta.atcoder.jp/contests/{}/submissions/me'.format( self.contest_id) return utils.DummySubmission(url, problem=self) else: log.failure('failure') log.debug('redirected to %s', resp.url) raise SubmissionError('it may be a rate limit')
def exec_command( command_str: str, *, stdin: Optional[IO[Any]] = None, input: Optional[bytes] = None, timeout: Optional[float] = None, gnu_time: Optional[str] = None ) -> Tuple[Dict[str, Any], subprocess.Popen]: if input is not None: assert stdin is None stdin = subprocess.PIPE # type: ignore if gnu_time is not None: context = tempfile.NamedTemporaryFile(delete=True) # type: Any else: context = contextlib.ExitStack( ) # TODO: we should use contextlib.nullcontext() if possible with context as fh: command = shlex.split(command_str) if gnu_time is not None: command = [gnu_time, '-f', '%M', '-o', fh.name, '--'] + command if os.name == 'nt': # HACK: without this encoding and decoding, something randomly fails with multithreading; see https://github.com/kmyk/online-judge-tools/issues/468 command = command_str.encode().decode() # type: ignore begin = time.perf_counter() try: proc = subprocess.Popen(command, stdin=stdin, stdout=subprocess.PIPE, stderr=sys.stderr) except FileNotFoundError: log.error('No such file or directory: %s', command) sys.exit(1) except PermissionError: log.error('Permission denied: %s', command) sys.exit(1) try: answer, _ = proc.communicate(input=input, timeout=timeout) except subprocess.TimeoutExpired: proc.terminate() answer = None end = time.perf_counter() memory = None # type: Optional[float] if gnu_time is not None: with open(fh.name) as fh1: reported = fh1.read() log.debug('GNU time says:\n%s', reported) if reported.strip() and reported.splitlines()[-1].isdigit(): memory = int(reported.splitlines()[-1]) / 1000 info = { 'answer': answer, # Optional[byte] 'elapsed': end - begin, # float, in second 'memory': memory, # Optional[float], in megabyte } return info, proc
def get_available_languages( self, session: Optional[requests.Session] = None) -> List[Language]: """ :raises NotLoggedInError: """ content = self.download_content(session=session) if content.available_languages is None: log.error('not logged in') raise NotLoggedInError return content.available_languages
def main(args: Optional[List[str]] = None) -> None: log.addHandler(log.logging.StreamHandler(sys.stderr)) log.setLevel(log.logging.INFO) version_check() parser = get_parser() namespace = parser.parse_args(args=args) try: run_program(namespace, parser=parser) except NotImplementedError as e: log.debug('\n' + traceback.format_exc()) log.error('NotImplementedError') log.info('The operation you specified is not supported yet. Pull requests are welcome.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/CONTRIBUTING.md')
def exec_command(command: List[str], timeout: float = None, **kwargs) -> Tuple[bytes, subprocess.Popen]: try: proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=sys.stderr, **kwargs) except FileNotFoundError: log.error('No such file or directory: %s', command) sys.exit(1) except PermissionError: log.error('Permission denied: %s', command) sys.exit(1) try: answer, _ = proc.communicate(timeout=timeout) except subprocess.TimeoutExpired: answer = b'' return answer, proc
def add(self, content: bytes, name: str) -> None: if self._dangling is None: if re.search('output', name, re.IGNORECASE) or re.search('出力', name): log.error('strange name for input string: %s', name) raise SampleParseError() self._dangling = (name, content) else: if re.search('input', name, re.IGNORECASE) or re.search('入力', name): if not (re.search('output', name, re.IGNORECASE) or re.search('出力', name)): # to ignore titles like "Output for Sample Input 1" log.error('strange name for output string: %s', name) raise SampleParseError() index = len(self._testcases) input_name, input_content = self._dangling self._testcases += [TestCase('sample-{}'.format(index + 1), input_name, input_content, name, content)] self._dangling = None
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # example: https://www.hackerrank.com/rest/contests/hourrank-1/challenges/beautiful-array/download_testcases url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}/download_testcases'.format( self.contest_slug, self.challenge_slug) resp = utils.request('GET', url, session=session, raise_for_status=False) if resp.status_code != 200: log.error('response: %s', resp.content.decode()) return [] return onlinejudge._implementation.testcase_zipper.extract_from_zip( resp.content, '%eput/%eput%s.txt')
def submit_code( self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None ) -> onlinejudge.type.Submission: """ :raises NotLoggedInError: :raises SubmissionError: """ session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', class_='submitForm') if form is None: log.error('not logged in') raise NotLoggedInError log.debug('form: %s', str(form)) # make data form = utils.FormSender(form, url=resp.url) form.set('programTypeId', language_id) form.set_file('sourceFile', filename or 'code', code) resp = form.request(session=session) resp.raise_for_status() # result if resp.url.endswith('/my'): # example: https://codeforces.com/contest/598/my log.success('success: result: %s', resp.url) return utils.DummySubmission(resp.url, problem=self) else: log.failure('failure') # parse error messages soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) msgs = [] # type: List[str] for span in soup.findAll('span', class_='error'): msgs += [span.string] log.warning('Codeforces says: "%s"', span.string) raise SubmissionError( 'it may be the "You have submitted exactly the same code before" error: ' + str(msgs))
def _generate_test_cases_in_cloned_repository(self): path = self._get_cloned_repository_path() try: subprocess.check_call(['git', '--version'], stdout=sys.stdout, stderr=sys.stderr) except FileNotFoundError: log.error('git command not found') raise # init the problem repository if not path.exists(): url = 'https://github.com/yosupo06/library-checker-problems' log.status('$ git clone %s %s', url, path) subprocess.check_call( ['git', 'clone', url, str(path)], stdout=sys.stdout, stderr=sys.stderr) log.status('$ cd %s', path) with utils.chdir(path): # sync the problem repository log.status('$ git pull') subprocess.check_call(['git', 'pull'], stdout=sys.stdout, stderr=sys.stderr) # generate test cases if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") log.status('$ ./generate.py problems.toml -p %s', self.problem_id) try: subprocess.check_call([ sys.executable, 'generate.py', 'problems.toml', '-p', self.problem_id ], stdout=sys.stdout, stderr=sys.stderr) except subprocess.CalledProcessError: log.error( "the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues" ) raise
def _get_model( self, session: Optional[requests.Session] = None) -> Dict[str, Any]: """ :raises SubmissionError: """ session = session or utils.new_default_session() # get url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}'.format( self.contest_slug, self.challenge_slug) resp = utils.request('GET', url, session=session) # parse it = json.loads(resp.content.decode()) log.debug('json: %s', it) if not it['status']: log.error('get model: failed') raise SubmissionError return it['model']
def _generate_test_cases_in_cloned_repository(self, compile_checker: bool = False) -> None: LibraryCheckerService._update_cloned_repository() path = LibraryCheckerService._get_cloned_repository_path() if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") problem_spec = str(self._get_problem_directory_path() / 'info.toml') command = [sys.executable, str(path / 'generate.py'), problem_spec] if compile_checker: command.append('--compile-checker') log.status('$ %s', ' '.join(command)) try: subprocess.check_call(command, stdout=sys.stderr, stderr=sys.stderr) except subprocess.CalledProcessError: log.error("the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues") raise
def _get_task_id(self, session: Optional[requests.Session] = None) -> int: if self._task_id is None: session = session or utils.new_default_session() # get resp = _request('GET', self.get_url(type='old'), session=session) msgs = AtCoderService._get_messages_from_cookie(resp.cookies) if AtCoderService._report_messages(msgs, unexpected=True): raise SubmissionError # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) submit = soup.find('a', href=re.compile(r'^/submit\?task_id=')) if not submit: log.error('link to submit not found') raise SubmissionError m = re.match(r'^/submit\?task_id=([0-9]+)$', submit.attrs['href']) assert m self._task_id = int(m.group(1)) return self._task_id
def submit_code( self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None ) -> onlinejudge.type.Submission: """ :raises NotLoggedInError: """ session = session or utils.get_default_session() # get url = self.get_url() + '/submit' resp = utils.request('GET', url, session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form', id='submit_form') if not form: log.error('form not found') raise NotLoggedInError # post form = utils.FormSender(form, url=resp.url) form.set('lang', language_id) form.set_file('file', filename or 'code', code) form.unset('custom_test') resp = form.request(session=session) resp.raise_for_status() # result if 'submissions' in resp.url: # example: https://yukicoder.me/submissions/314087 log.success('success: result: %s', resp.url) return utils.DummySubmission(resp.url, problem=self) else: log.failure('failure') soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) for div in soup.findAll('div', attrs={'role': 'alert'}): log.warning('yukicoder says: "%s"', div.string) raise SubmissionError
def submit_code(self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None) -> Submission: """ :raises NotImplementedError: :raises SubmissionError: """ session = session or utils.get_default_session() # get resp = utils.request('GET', self.get_url(), session=session) # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form') if form is None: log.error('not logged in') raise LoginError log.debug('form: %s', str(form)) if form.find('select' ) and form.find('select').attrs['name'] != 'languageId': log.error("Wrong submission URL") raise SubmissionError # make data form = utils.FormSender(form, url=resp.url) form.set('languageId', language_id) form.set_file('source', 'code', code) resp = form.request(session=session) resp.raise_for_status() # result if '/s/' in resp.url: # example: https://toph.co/s/201410 log.success('success: result: %s', resp.url) return utils.DummySubmission(resp.url, problem=self) else: log.failure('failure') log.debug('redirected to %s', resp.url) raise SubmissionError
def _get_messages_from_cookie(cls, cookies) -> List[str]: msgtags = [] # type: List[str] for cookie in cookies: log.debug('cookie: %s', str(cookie)) if cookie.name.startswith('__message_'): msg = json.loads(urllib.parse.unquote_plus(cookie.value)) msgtags += [msg['c']] log.debug('message: %s: %s', cookie.name, str(msg)) msgs = [] # type: List[str] for msgtag in msgtags: soup = bs4.BeautifulSoup(msgtag, utils.html_parser) msg = None for tag in soup.find_all(): if tag.string and tag.string.strip(): msg = tag.string break if msg is None: log.error('failed to parse message') else: msgs += [msg] return msgs
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.new_default_session() # get # example: https://www.hackerrank.com/rest/contests/hourrank-1/challenges/beautiful-array/download_testcases url = 'https://www.hackerrank.com/rest/contests/{}/challenges/{}/download_testcases'.format( self.contest_slug, self.challenge_slug) resp = utils.request('GET', url, session=session, raise_for_status=False) if resp.status_code != 200: log.error('response: %s', resp.content.decode()) return [] # parse with zipfile.ZipFile(io.BytesIO(resp.content)) as fh: # list names names = [] # type: List[str] pattern = re.compile(r'(in|out)put/\1put(\d+).txt') for filename in sorted(fh.namelist()): # "input" < "output" if filename.endswith('/'): continue log.debug('filename: %s', filename) m = pattern.match(filename) assert m if m.group(1) == 'in': names += [m.group(2)] # zip samples samples = [] # type: List[TestCase] for name in names: inpath = 'input/input{}.txt'.format(name) outpath = 'output/output{}.txt'.format(name) indata = fh.read(inpath).decode() outdata = fh.read(outpath).decode() samples += [ TestCase(LabeledString(inpath, indata), LabeledString(outpath, outdata)) ] return samples
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.new_default_session() # get header # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2Fheader_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/header'.format( self.problem_id) resp = utils.request('GET', url, session=session) header = json.loads(resp.content.decode(resp.encoding)) # get testcases via the official API testcases = [] # type: List[TestCase] for header in header['headers']: # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2F%7BproblemId%7D%2F%7Bserial%7D_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/{}/{}'.format( self.problem_id, header['serial']) resp = utils.request('GET', url, session=session) testcase = json.loads(resp.content.decode(resp.encoding)) skipped = False for type in ('in', 'out'): if testcase[type].endswith( '..... (terminated because of the limitation)\n'): log.error( 'AOJ API says: terminated because of the limitation') skipped = True if skipped: log.warning("skipped due to the limitation of AOJ API") continue testcases += [ TestCase( header['name'], header['name'], testcase['in'].encode(), header['name'], testcase['out'].encode(), ) ] return testcases
def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() base_url = self.get_url() # get csrftoken resp = utils.request('GET', base_url, session=session) csrftoken = None for cookie in session.cookies: if cookie.name == 'csrftoken' and cookie.domain == 'csacademy.com': # type: ignore csrftoken = cookie.value # type: ignore if csrftoken is None: log.error('csrftoken is not found') return [] # get config headers = { 'x-csrftoken': csrftoken, 'x-requested-with': 'XMLHttpRequest', } contest_url = 'https://csacademy.com/contest/{}/'.format(self.contest_name) resp = utils.request('GET', contest_url, session=session, headers=headers) # parse config assert resp.encoding is None config = json.loads(resp.content.decode()) # NOTE: Should I memoize this? Is the CSAcademyRound class required? task_config = None for it in config['state']['contesttask']: if it['name'] == self.task_name: task_config = it if task_config is None: log.error('no such task: %s', self.task_name) return [] # get get_contest_task_url = 'https://csacademy.com/contest/get_contest_task/' payload = {'contestTaskId': (None, str(task_config['id']))} headers = { 'x-csrftoken': csrftoken, 'x-requested-with': 'XMLHttpRequest', 'Referer': base_url, } resp = utils.request('POST', get_contest_task_url, session=session, files=payload, headers=headers) # parse assert resp.encoding is None contest_task = json.loads(resp.content.decode()) # NOTE: Should I memoize this? if contest_task.get('title') == 'Page not found': log.error('something wrong') return [] samples = [] for test_number, example_test in enumerate(contest_task['state']['EvalTask'][0]['exampleTests']): inname = 'Input {}'.format(test_number) outname = 'Output {}'.format(test_number) samples += [TestCase( 'sample-{}'.format(test_number + 1), inname, example_test['input'].encode(), outname, example_test['output'].encode(), )] return samples
def download_system_cases( self, session: Optional[requests.Session] = None) -> List[TestCase]: """ :raises NotLoggedInError: """ session = session or utils.new_default_session() if not self.get_service().is_logged_in(session=session): raise NotLoggedInError # get url = 'https://yukicoder.me/problems/no/{}/testcase.zip'.format( self.problem_no) resp = utils.request('GET', url, session=session) # parse basenames = collections.defaultdict( dict) # type: Dict[str, Dict[str, LabeledString]] with zipfile.ZipFile(io.BytesIO(resp.content)) as fh: for filename in sorted(fh.namelist()): # "test_in" < "test_out" dirname = os.path.dirname(filename) basename = os.path.basename(filename) kind = {'test_in': 'input', 'test_out': 'output'}[dirname] content = fh.read(filename).decode() name = basename if os.path.splitext( name)[1] == '.in': # ".in" extension is confusing name = os.path.splitext(name)[0] basenames[basename][kind] = LabeledString(name, content) samples = [] # type: List[TestCase] for basename in sorted(basenames.keys()): data = basenames[basename] if 'input' not in data or 'output' not in data or len(data) != 2: log.error('dangling sample found: %s', str(data)) else: samples += [TestCase(data['input'], data['output'])] return samples
def login_with_github( self, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> None: """ :raise LoginError: """ session = session or utils.get_default_session() url = 'https://yukicoder.me/auth/github' # get resp = utils.request('GET', url, session=session) if urllib.parse.urlparse(resp.url).hostname == 'yukicoder.me': log.info('You have already signed in.') return # redirect to github.com # parse soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser) form = soup.find('form') if not form: log.error('form not found') raise LoginError('something wrong') log.debug('form: %s', str(form)) # post username, password = get_credentials() form = utils.FormSender(form, url=resp.url) form.set('login', username) form.set('password', password) resp = form.request(session) resp.raise_for_status() if urllib.parse.urlparse(resp.url).hostname == 'yukicoder.me': log.success('You signed in.') else: log.failure('You failed to sign in. Wrong user ID or password.') raise LoginError