def _update_cloned_repository(cls) -> None: if cls.is_repository_updated: return try: subprocess.check_call(['git', '--version'], stdout=sys.stdout, stderr=sys.stderr) except FileNotFoundError: log.error('git command not found') raise path = LibraryCheckerService._get_cloned_repository_path() if not path.exists(): # init the problem repository url = 'https://github.com/yosupo06/library-checker-problems' log.status('$ git clone %s %s', url, path) subprocess.check_call( ['git', 'clone', url, str(path)], stdout=sys.stdout, stderr=sys.stderr) else: # sync the problem repository log.status('$ git -C %s pull', str(path)) subprocess.check_call(['git', '-C', str(path), 'pull'], stdout=sys.stdout, stderr=sys.stderr) cls.is_repository_updated = True
def generate_output(args: 'argparse.Namespace') -> None: if not args.test: args.test = cutils.glob_with_format(args.directory, args.format) # by default if args.ignore_backup: args.test = cutils.drop_backup_or_hidden_files(args.test) tests = cutils.construct_relationship_of_files(args.test, args.directory, args.format) for name, it in sorted(tests.items()): log.emit('') log.info('%s', name) if 'out' in it: log.info('output file already exists.') log.info('skipped.') continue with it['in'].open() as inf: begin = time.perf_counter() answer, proc = utils.exec_command(args.command, shell=True, stdin=inf) end = time.perf_counter() log.status('time: %f sec', end - begin) if proc.returncode != 0: log.failure(log.red('RE') + ': return code %d', proc.returncode) log.info('skipped.') continue log.emit(utils.snip_large_file_content(answer, limit=40, head=20, tail=10, bold=True)) match_result = cutils.match_with_format(args.directory, args.format, it['in']) # type: Optional[Match[Any]] if match_result is not None: matched_name = match_result.groupdict()['name'] # type: str else: assert False path = cutils.path_from_format(args.directory, args.format, name=matched_name, ext='out') if not path.parent.is_dir(): os.makedirs(str(path.parent), exist_ok=True) with path.open('wb') as fh: fh.write(answer) log.success('saved to: %s', path)
def match(a, b): # On Windows, a temp file is not created if we use "with" statement, user_output = tempfile.NamedTemporaryFile(delete=False) judge_result = False try: if rstrip: user_output.write(a.rstrip(rstrip_targets).encode()) else: user_output.write(a.encode()) user_output.close() arg0 = judge arg1 = str(test_input_path.resolve()) arg2 = user_output.name arg3 = str((str(test_output_path.resolve()) if test_output_path is not None else '')) actual_command = '{} {} {} {}'.format( arg0, arg1, arg2, arg3 ) # TODO: quote arguments for paths including spaces; see https://github.com/kmyk/online-judge-tools/pull/584 log.status('$ %s', actual_command) info, proc = utils.exec_command(actual_command) if not silent: log.emit( 'judge\'s output:\n%s', utils.snip_large_file_content(info['answer'] or b'', limit=40, head=20, tail=10, bold=True)) judge_result = (proc.returncode == 0) finally: os.unlink(user_output.name) return judge_result
def login(self, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> None: """ :raises LoginError: """ session = session or utils.new_default_session() url = 'https://practice.contest.atcoder.jp/login' # get resp = _request('GET', url, session=session, allow_redirects=False) msgs = AtCoderService._get_messages_from_cookie(resp.cookies) for msg in msgs: log.status('message: %s', msg) if msgs: if 'login' not in resp.url: return # redirect means that you are already logged in else: raise LoginError('something wrong: ' + str(msgs)) # post username, password = get_credentials() resp = _request('POST', url, session=session, data={ 'name': username, 'password': password }, allow_redirects=False) msgs = AtCoderService._get_messages_from_cookie(resp.cookies) AtCoderService._report_messages(msgs) if 'login' not in resp.url: pass # AtCoder redirects to the top page if success else: raise LoginError('your password may be not correct: ' + str(msgs))
def download(args: 'argparse.Namespace') -> None: # prepare values problem = onlinejudge.dispatch.problem_from_url(args.url) if problem is None: raise requests.exceptions.InvalidURL('The contest "%s" is not supported' % args.url) is_default_format = args.format is None and args.directory is None # must be here since args.directory and args.format are overwritten if args.directory is None: args.directory = pathlib.Path('test') if args.format is None: args.format = '%b.%e' # get samples from the server with utils.with_cookiejar(utils.new_session_with_our_user_agent(), path=args.cookie) as sess: if args.yukicoder_token and isinstance(problem, YukicoderProblem): sess.headers['Authorization'] = 'Bearer {}'.format(args.yukicoder_token) if args.system: samples = problem.download_system_cases(session=sess) else: samples = problem.download_sample_cases(session=sess) if not samples: raise onlinejudge.type.SampleParseError("Sample not found") # append the history for submit command if not args.dry_run and is_default_format: history = onlinejudge._implementation.download_history.DownloadHistory() history.add(problem) # write samples to files for i, sample in enumerate(samples): log.emit('') log.info('sample %d', i) for ext in ['in', 'out']: data = getattr(sample, ext + 'put_data') if data is None: continue name = sample.name table = {} table['i'] = str(i + 1) table['e'] = ext table['n'] = name table['b'] = os.path.basename(name) table['d'] = os.path.dirname(name) path = args.directory / format_utils.percentformat(args.format, table) # type: pathlib.Path log.status('%sput: %s', ext, name) if not args.silent: log.emit(utils.snip_large_file_content(data, limit=40, head=20, tail=10, bold=True)) if args.dry_run: continue if path.exists(): raise FileExistsError('Failed to download since file already exists: ' + str(path)) path.parent.mkdir(parents=True, exist_ok=True) with path.open('wb') as fh: fh.write(data) log.success('saved to: %s', path) # print json if args.json: print(json.dumps(list(map(convert_sample_to_dict, samples))))
def _report_messages(cls, msgs: List[str], unexpected: bool = False) -> bool: for msg in msgs: log.status('message: %s', msg) if msgs and unexpected: log.failure('unexpected messages found') return bool(msgs)
def call_generate_scanner(self, url, expected, options=[]): cmd = [self.ojtools, '-v', 'generate-scanner', url] + options output = subprocess.check_output(cmd, stderr=sys.stderr).decode() log.status('result:\n%s', output) if expected != output: log.error('expected:\n%s' % expected) self.assertEqual(expected, output) time.sleep(1)
def _flush(self) -> None: # halve the size if it is more than 1MiB if self.path.stat().st_size >= 1024 * 1024: with open(str(self.path)) as fh: history_lines = fh.readlines() with open(str(self.path), 'w') as fh: fh.write(''.join(history_lines[:-len(history_lines) // 2])) log.status('halve history at: %s', self.path)
def contest_from_url(url: str) -> Optional[Contest]: for cls in contests: contest = cls.from_url(url) if contest is not None: log.status('contest recognized: %s: %s', str(contest), url) return contest log.failure('unknown contest: %s', url) return None
def format_code(code: bytes, dos2unix: bool = False, rstrip: bool = False) -> bytes: if dos2unix: log.status('dos2unix...') code = code.replace(b'\r\n', b'\n') if rstrip: log.status('rstrip...') code = code.rstrip() return code
def submission_from_url(url: str) -> Optional[Submission]: for cls in submissions: submission = cls.from_url(url) if submission is not None: log.status('submission recognized: %s: %s', str(submission), url) return submission log.failure('unknown submission: %s', url) return None
def download(args: 'argparse.Namespace') -> None: # prepare values problem = onlinejudge.dispatch.problem_from_url(args.url) if problem is None: sys.exit(1) is_default_format = args.format is None and args.directory is None # must be here since args.directory and args.format are overwritten if args.directory is None: args.directory = pathlib.Path('test') if args.format is None: args.format = '%b.%e' # get samples from the server with utils.with_cookiejar(utils.new_session_with_our_user_agent(), path=args.cookie) as sess: if args.system: samples = problem.download_system_cases(session=sess) # type: ignore else: samples = problem.download_sample_cases(session=sess) # type: ignore # append the history for submit command if not args.dry_run and is_default_format: history = onlinejudge._implementation.download_history.DownloadHistory() history.add(problem) # write samples to files for i, sample in enumerate(samples): log.emit('') log.info('sample %d', i) for ext in ['in', 'out']: data = getattr(sample, ext + 'put_data') if data is None: continue name = sample.name table = {} table['i'] = str(i + 1) table['e'] = ext table['n'] = name table['b'] = os.path.basename(name) table['d'] = os.path.dirname(name) path = args.directory / format_utils.percentformat(args.format, table) # type: pathlib.Path log.status('%sput: %s', ext, name) if not args.silent: log.emit(utils.snip_large_file_content(data, limit=40, head=20, tail=10, bold=True)) if args.dry_run: continue if path.exists(): log.warning('file already exists: %s', path) if not args.overwrite: log.warning('skipped') continue path.parent.mkdir(parents=True, exist_ok=True) with path.open('wb') as fh: fh.write(data) log.success('saved to: %s', path) # print json if args.json: print(json.dumps(list(map(convert_sample_to_dict, samples))))
def with_cookiejar(session: requests.Session, path: pathlib.Path = default_cookie_path) -> Generator[requests.Session, None, None]: session.cookies = http.cookiejar.LWPCookieJar(str(path)) # type: ignore if path.exists(): log.status('load cookie from: %s', path) session.cookies.load() # type: ignore yield session log.status('save cookie to: %s', path) path.parent.mkdir(parents=True, exist_ok=True) session.cookies.save() # type: ignore path.chmod(0o600) # NOTE: to make secure a little bit
def add(self, problem: onlinejudge.type.Problem, directory: pathlib.Path = pathlib.Path.cwd()) -> None: self.path.parent.mkdir(parents=True, exist_ok=True) with open(str(self.path), 'a') as fh: fh.write(json.dumps({ 'timestamp': int(time.time()), # this should not be int, but Python's strptime is too weak and datetime.fromisoformat is from 3.7 'directory': str(directory), 'url': problem.get_url(), }) + '\n') log.status('append history to: %s', self.path) self._flush()
def service_from_url(url: str) -> Optional[Service]: for cls in services: service = cls.from_url(url) if service is not None: log.status('service recognized: %s: %s', str(service), url) return service submission = submission_from_url(url) if submission is not None: return submission.get_service() problem = problem_from_url(url) if problem is not None: return problem.get_service() log.failure('unknown service: %s', url) return None
def download_sample_cases( self, *, session: Optional[requests.Session] = None) -> List[TestCase]: session = session or utils.get_default_session() # get samples via the official API # reference: http://developers.u-aizu.ac.jp/api?key=judgedat%2Ftestcases%2Fsamples%2F%7BproblemId%7D_GET url = 'https://judgedat.u-aizu.ac.jp/testcases/samples/{}'.format( self.problem_id) resp = utils.request('GET', url, session=session) samples = [] # type: List[TestCase] for i, sample in enumerate( json.loads(resp.content.decode(resp.encoding))): samples += [ TestCase( 'sample-{}'.format(i + 1), str(sample['serial']), sample['in'].encode(), str(sample['serial']), sample['out'].encode(), ) ] # parse HTML if no samples are registered # see: https://github.com/kmyk/online-judge-tools/issues/207 if not samples: log.warning("sample cases are not registered in the official API") log.status("fallback: parsing HTML") # reference: http://developers.u-aizu.ac.jp/api?key=judgeapi%2Fresources%2Fdescriptions%2F%7Blang%7D%2F%7Bproblem_id%7D_GET url = 'https://judgeapi.u-aizu.ac.jp/resources/descriptions/ja/{}'.format( self.problem_id) resp = utils.request('GET', url, session=session) html = json.loads(resp.content.decode(resp.encoding))['html'] # list h3+pre zipper = onlinejudge._implementation.testcase_zipper.SampleZipper() expected_strings = ('入力例', '出力例', 'Sample Input', 'Sample Output') soup = bs4.BeautifulSoup(html, utils.html_parser) for pre in soup.find_all('pre'): tag = pre.find_previous_sibling() if tag and tag.name == 'h3' and tag.string and any( s in tag.string for s in expected_strings): s = utils.textfile(utils.parse_content(pre).lstrip()) zipper.add(s.encode(), tag.string) samples = zipper.get() return samples
def problem_from_url(url: str) -> Optional[Problem]: """ >>> onlinejudge.dispatch.problem_from_url("https://atcoder.jp/contests/abc077/tasks/arc084_b") <onlinejudge.service.atcoder.AtCoderProblem object at 0x7fa0538ead68> >>> onlinejudge.dispatch.problem_from_url("https://codeforces.com/contest/1012/problem/D") <onlinejudge.service.codeforces.CodeforcesProblem object at 0x7fa05a916710> """ for cls in problems: problem = cls.from_url(url) if problem is not None: log.status('problem recognized: %s: %s', str(problem), url) return problem log.failure('unknown problem: %s', url) return None
def generate_output_single_case(test_name: str, test_input_path: pathlib.Path, *, lock: Optional[threading.Lock] = None, args: 'argparse.Namespace') -> None: # print the header if lock is None: log.emit('') log.info('%s', test_name) # run the command with test_input_path.open() as inf: info, proc = utils.exec_command(args.command, stdin=inf, timeout=args.tle) answer = info['answer'] # type: Optional[bytes] elapsed = info['elapsed'] # type: float # acquire lock to print logs properly, if in parallel nullcontext = contextlib.ExitStack() with lock or nullcontext: if lock is not None: log.emit('') log.info('%s', test_name) # check the result log.status('time: %f sec', elapsed) if proc.returncode is None: log.failure(log.red('TLE')) log.info('skipped.') return elif proc.returncode != 0: log.failure(log.red('RE') + ': return code %d', proc.returncode) log.info('skipped.') return assert answer is not None log.emit(utils.snip_large_file_content(answer, limit=40, head=20, tail=10, bold=True)) # find the destination path match_result = fmtutils.match_with_format(args.directory, args.format, test_input_path) # type: Optional[Match[Any]] if match_result is not None: matched_name = match_result.groupdict()['name'] # type: str else: assert False test_output_path = fmtutils.path_from_format(args.directory, args.format, name=matched_name, ext='out') # write the result to the file if not test_output_path.parent.is_dir(): os.makedirs(str(test_output_path.parent), exist_ok=True) with test_output_path.open('wb') as fh: fh.write(answer) log.success('saved to: %s', test_output_path)
def login_with_browser(service: onlinejudge.type.Service, *, session: requests.Session) -> None: try: import selenium.webdriver except ImportError: raise try: profile = selenium.webdriver.FirefoxProfile() profile.set_preference("general.useragent.override", session.headers['User-Agent']) driver = selenium.webdriver.Firefox(firefox_profile=profile) except selenium.common.exceptions.WebDriverException as e: raise WebDriverException(e) # get cookies via Selenium url = service.get_url_of_login_page() log.info('open with WebDriver: %s', url) driver.get(url) cookies = [] # type: List[Dict[str, str]] try: while driver.current_url: cookies = driver.get_cookies() time.sleep(0.1) except selenium.common.exceptions.WebDriverException: pass # the window is closed # set cookies to the requests.Session log.info('copy cookies from WebDriver') for c in cookies: log.status('set cookie: %s', c['name']) morsel = http.cookies.Morsel() # type: http.cookies.Morsel morsel.set(c['name'], c['value'], c['value']) morsel.update({ key: value for key, value in c.items() if morsel.isReservedKey(key) }) if not morsel['expires']: expires = datetime.datetime.now( datetime.timezone.utc).astimezone() + datetime.timedelta( days=180) morsel.update({ 'expires': expires.strftime('%a, %d-%b-%Y %H:%M:%S GMT') }) # RFC2109 format cookie = requests.cookies.morsel_to_cookie(morsel) session.cookies.set_cookie(cookie) # type: ignore
def get(self, directory: pathlib.Path = pathlib.Path.cwd()) -> List[str]: if not self.path.exists(): return [] log.status('read history from: %s', self.path) found = set() with open(str(self.path)) as fh: for line in fh: try: data = json.loads(line) except json.decoder.JSONDecodeError as e: log.warning('corrupted line found in: %s', self.path) log.debug('%s', traceback.format_exc()) continue if pathlib.Path(data['directory']) == directory: found.add(data['url']) log.status('found urls in history:\n%s', '\n'.join(found)) return list(found)
def _generate_test_cases_in_cloned_repository(self, compile_checker: bool = False) -> None: LibraryCheckerService._update_cloned_repository() path = LibraryCheckerService._get_cloned_repository_path() if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") problem_spec = str(self._get_problem_directory_path() / 'info.toml') command = [sys.executable, str(path / 'generate.py'), problem_spec] if compile_checker: command.append('--compile-checker') log.status('$ %s', ' '.join(command)) try: subprocess.check_call(command, stdout=sys.stderr, stderr=sys.stderr) except subprocess.CalledProcessError: log.error("the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues") raise
def with_cookiejar(session: requests.Session, *, path: pathlib.Path = default_cookie_path ) -> Iterator[requests.Session]: """ :param session: the session to set a cookiejar :param path: a path to the file to store cookies. the default cookiejar is used if :py:class:`None` """ session.cookies = http.cookiejar.LWPCookieJar(str(path)) # type: ignore if path.exists(): log.status('load cookie from: %s', path) session.cookies.load() # type: ignore yield session log.status('save cookie to: %s', path) path.parent.mkdir(parents=True, exist_ok=True) session.cookies.save() # type: ignore path.chmod(0o600) # NOTE: to make secure a little bit
def _generate_test_cases_in_cloned_repository(self) -> None: LibraryCheckerService._update_cloned_repository() path = LibraryCheckerService._get_cloned_repository_path() if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") command = [ sys.executable, str(path / 'generate.py'), str(path / 'problems.toml'), '-p', self.problem_id ] log.status('$ %s', ' '.join(command)) try: subprocess.check_call(command, stdout=sys.stdout, stderr=sys.stderr) except subprocess.CalledProcessError: log.error( "the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues" ) raise
def _generate_test_cases_in_cloned_repository(self): path = self._get_cloned_repository_path() try: subprocess.check_call(['git', '--version'], stdout=sys.stdout, stderr=sys.stderr) except FileNotFoundError: log.error('git command not found') raise # init the problem repository if not path.exists(): url = 'https://github.com/yosupo06/library-checker-problems' log.status('$ git clone %s %s', url, path) subprocess.check_call( ['git', 'clone', url, str(path)], stdout=sys.stdout, stderr=sys.stderr) log.status('$ cd %s', path) with utils.chdir(path): # sync the problem repository log.status('$ git pull') subprocess.check_call(['git', 'pull'], stdout=sys.stdout, stderr=sys.stderr) # generate test cases if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") log.status('$ ./generate.py problems.toml -p %s', self.problem_id) try: subprocess.check_call([ sys.executable, 'generate.py', 'problems.toml', '-p', self.problem_id ], stdout=sys.stdout, stderr=sys.stderr) except subprocess.CalledProcessError: log.error( "the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues" ) raise
def request(method: str, url: str, session: requests.Session, raise_for_status: bool = True, **kwargs) -> requests.Response: assert method in ['GET', 'POST'] kwargs.setdefault('allow_redirects', True) log.status('%s: %s', method, url) resp = session.request(method, url, **kwargs) if resp.url != url: log.status('redirected: %s', resp.url) log.status(describe_status_code(resp.status_code)) if raise_for_status: resp.raise_for_status() return resp
def test_single_case(test_name: str, test_input_path: pathlib.Path, test_output_path: Optional[pathlib.Path], *, lock: Optional[threading.Lock] = None, args: 'argparse.Namespace') -> Dict[str, Any]: # print the header earlier if not in parallel if lock is None: log.emit('') log.info('%s', test_name) # run the binary with test_input_path.open() as inf: info, proc = utils.exec_command(args.command, stdin=inf, timeout=args.tle, gnu_time=args.gnu_time) # TODO: the `answer` should be bytes, not str answer = (info['answer'] or b'').decode(errors='replace') # type: str elapsed = info['elapsed'] # type: float memory = info['memory'] # type: Optional[float] # lock is require to avoid mixing logs if in parallel nullcontext = contextlib.ExitStack() # TODO: use contextlib.nullcontext() after updating Python to 3.7 with lock or nullcontext: if lock is not None: log.emit('') log.info('%s', test_name) log.status('time: %f sec', elapsed) if memory: if memory < MEMORY_PRINT: if args.print_memory: log.status('memory: %f MB', memory) elif memory < MEMORY_WARNING: log.status('memory: %f MB', memory) else: log.warning('memory: %f MB', memory) status = compare_and_report(proc, answer, elapsed, memory, test_input_path, test_output_path, mle=args.mle, mode=args.mode, error=args.error, does_print_input=args.print_input, silent=args.silent, rstrip=args.rstrip, judge=args.judge) # return the result testcase = { 'name': test_name, 'input': str(test_input_path.resolve()), } if test_output_path: testcase['output'] = str(test_output_path.resolve()) return { 'status': status, 'testcase': testcase, 'output': answer, 'exitcode': proc.returncode, 'elapsed': elapsed, 'memory': memory, }
def guess_lang_ids_of_file(filename: pathlib.Path, code: bytes, language_dict, cxx_latest: bool = False, cxx_compiler: str = 'all', python_version: str = 'all', python_interpreter: str = 'all') -> List[str]: assert cxx_compiler.lower() in ('gcc', 'clang', 'all') assert python_version.lower() in ('2', '3', 'auto', 'all') assert python_interpreter.lower() in ('cpython', 'pypy', 'all') select_words = ( lambda words, lang_ids, **kwargs: select_ids_of_matched_languages( words, lang_ids, language_dict=language_dict, **kwargs)) select = (lambda word, lang_ids, **kwargs: select_words([word], lang_ids, **kwargs)) ext = filename.suffix lang_ids = language_dict.keys() log.debug('file extension: %s', ext) ext = ext.lstrip('.') if ext in ('cpp', 'cxx', 'cc', 'C'): log.debug('language guessing: C++') # memo: https://stackoverflow.com/questions/1545080/c-code-file-extension-cc-vs-cpp lang_ids = list(set(select('c++', lang_ids) + select('g++', lang_ids))) if not lang_ids: return [] log.debug('all lang ids for C++: %s', lang_ids) # compiler select_gcc = lambda ids: list( set( select('gcc', ids) + select( 'clang', select('g++', ids), remove=True))) if select_gcc(lang_ids) and select('clang', lang_ids): log.status('both GCC and Clang are available for C++ compiler') if cxx_compiler.lower() == 'gcc': log.status('use: GCC') lang_ids = select_gcc(lang_ids) elif cxx_compiler.lower() == 'clang': log.status('use: Clang') lang_ids = select('clang', lang_ids) else: assert cxx_compiler.lower() == 'all' log.debug('lang ids after compiler filter: %s', lang_ids) # version if cxx_latest: saved_ids = lang_ids lang_ids = [] for compiler in (None, 'gcc', 'clang'): # use the latest for each compiler version_of = {} if compiler == 'gcc': ids = select_gcc(saved_ids) elif compiler == 'clang': ids = select('clang', saved_ids) else: ids = saved_ids if not ids: continue for lang_id in ids: m = re.search( r'[cg]\+\+\w\w', language_dict[lang_id]['description'].lower()) if m: version_of[lang_id] = m.group(0) ids.sort(key=lambda lang_id: version_of.get(lang_id, '')) lang_ids += [ids[-1]] # since C++11 < C++1y < ... as strings lang_ids = list(set(lang_ids)) log.debug('lang ids after version filter: %s', lang_ids) assert lang_ids return lang_ids elif ext == 'py': log.debug('language guessing: Python') if select('pypy', language_dict.keys()): log.status('PyPy is available for Python interpreter') # interpreter lang_ids = [] if python_interpreter.lower() in ('cpython', 'all'): lang_ids += select('python', language_dict.keys()) elif python_interpreter.lower() in ('pypy', 'all') or not lang_ids: lang_ids += select('pypy', language_dict.keys()) # version if select_words(['python', '2'], lang_ids) and select_words( ['python', '3'], lang_ids): log.status( 'both Python2 and Python3 are available for version of Python') if python_version in ('2', '3'): versions = [int(python_version)] elif python_version == 'all': versions = [2, 3] else: assert python_version == 'auto' lines = code.splitlines() if code.startswith(b'#!'): s = lines[0] # use shebang else: s = b'\n'.join(lines[:10] + lines[-5:]) # use modelines versions = [] for version in (2, 3): if re.search( r'python *(version:? *)?%d'.encode() % version, s.lower()): versions += [version] if not versions: log.status('no version info in code') versions = [2, 3] log.status('use: %s', ', '.join(map(str, versions))) saved_ids = lang_ids lang_ids = [] for version in versions: lang_ids += select('python%d' % version, saved_ids) lang_ids += select('python %d' % version, saved_ids) lang_ids = list(set(lang_ids)) return lang_ids else: log.debug('language guessing: othres') table = [ { 'names': [ 'awk' ], 'exts': [ 'awk' ] }, { 'names': [ 'bash' ], 'exts': [ 'sh' ] }, { 'names': [ 'brainfuck' ], 'exts': [ 'bf' ] }, { 'names': [ 'c#' ], 'exts': [ 'cs' ] }, { 'names': [ 'c' ], 'exts': [ 'c' ], 'split': True }, { 'names': [ 'd' ], 'exts': [ 'd' ], 'split': True }, { 'names': [ 'f#' ], 'exts': [ 'fs' ] }, { 'names': [ 'fortran' ], 'exts': [ 'for', 'f', 'f90', 'f95', 'f03' ] }, { 'names': [ 'go' ], 'exts': [ 'go' ], 'split': True }, { 'names': [ 'haskell' ], 'exts': [ 'hs' ] }, { 'names': [ 'java' ], 'exts': [ 'java' ] }, { 'names': [ 'javascript' ], 'exts': [ 'js' ] }, { 'names': [ 'lua' ], 'exts': [ 'lua' ] }, { 'names': [ 'objective-c' ], 'exts': [ 'm' ] }, { 'names': [ 'ocaml' ], 'exts': [ 'ml' ] }, { 'names': [ 'octave' ], 'exts': [ 'm' ] }, { 'names': [ 'pascal' ], 'exts': [ 'pas' ] }, { 'names': [ 'perl6' ], 'exts': [ 'p6', 'pl6', 'pm6' ] }, { 'names': [ 'perl' ], 'exts': [ 'pl', 'pm' ], 'split': True }, { 'names': [ 'php' ], 'exts': [ 'php' ] }, { 'names': [ 'ruby' ], 'exts': [ 'rb' ] }, { 'names': [ 'rust' ], 'exts': [ 'rs' ] }, { 'names': [ 'scala' ], 'exts': [ 'scala' ] }, { 'names': [ 'scheme' ], 'exts': [ 'scm' ] }, { 'names': [ 'sed' ], 'exts': [ 'sed' ] }, { 'names': [ 'standard ml' ], 'exts': [ 'sml' ] }, { 'names': [ 'swift' ], 'exts': [ 'swift' ] }, { 'names': [ 'text' ], 'exts': [ 'txt' ] }, { 'names': [ 'typescript' ], 'exts': [ 'ts' ] }, { 'names': [ 'vim script' ], 'exts': [ 'vim' ] }, ] # type: List[Dict[str, Any]] # yapf: disable lang_ids = [] for data in table: if ext in data['exts']: for name in data['names']: lang_ids += select(name, language_dict.keys(), split=data.get('split', False)) return list(set(lang_ids))
def submit(args: 'argparse.Namespace') -> None: # guess url history = onlinejudge._implementation.download_history.DownloadHistory() if args.file.parent.resolve() == pathlib.Path.cwd(): guessed_urls = history.get() else: log.warning( 'cannot guess URL since the given file is not in the current directory' ) guessed_urls = [] if args.url is None: if len(guessed_urls) == 1: args.url = guessed_urls[0] log.info('guessed problem: %s', args.url) else: log.error('failed to guess the URL to submit') log.info('please manually specify URL as: $ oj submit URL FILE') sys.exit(1) # parse url problem = onlinejudge.dispatch.problem_from_url(args.url) if problem is None: sys.exit(1) # read code with args.file.open('rb') as fh: code = fh.read() # type: bytes format_config = { 'dos2unix': args.format_dos2unix or args.golf, 'rstrip': args.format_dos2unix or args.golf, } code = format_code(code, **format_config) # report code log.info('code (%d byte):', len(code)) log.emit( utils.snip_large_file_content(code, limit=30, head=10, tail=10, bold=True)) with utils.with_cookiejar(utils.new_session_with_our_user_agent(), path=args.cookie) as sess: # guess or select language ids langs = { language.id: { 'description': language.name } for language in problem.get_available_languages(session=sess) } # type: Dict[LanguageId, Dict[str, str]] matched_lang_ids = None # type: Optional[List[str]] if args.language in langs: matched_lang_ids = [args.language] else: if args.guess: kwargs = { 'language_dict': langs, 'cxx_latest': args.guess_cxx_latest, 'cxx_compiler': args.guess_cxx_compiler, 'python_version': args.guess_python_version, 'python_interpreter': args.guess_python_interpreter, } matched_lang_ids = guess_lang_ids_of_file( args.file, code, **kwargs) if not matched_lang_ids: log.info('failed to guess languages from the file name') matched_lang_ids = list(langs.keys()) if args.language is not None: log.info( 'you can use `--no-guess` option if you want to do an unusual submission' ) matched_lang_ids = select_ids_of_matched_languages( args.language.split(), matched_lang_ids, language_dict=langs) else: if args.language is None: matched_lang_ids = None else: matched_lang_ids = select_ids_of_matched_languages( args.language.split(), list(langs.keys()), language_dict=langs) # report selected language ids if matched_lang_ids is not None and len(matched_lang_ids) == 1: args.language = matched_lang_ids[0] log.info('chosen language: %s (%s)', args.language, langs[LanguageId(args.language)]['description']) else: if matched_lang_ids is None: log.error('language is unknown') log.info('supported languages are:') elif len(matched_lang_ids) == 0: log.error('no languages are matched') log.info('supported languages are:') else: log.error('Matched languages were not narrowed down to one.') log.info('You have to choose:') for lang_id in sorted(matched_lang_ids or langs.keys()): log.emit('%s (%s)', lang_id, langs[LanguageId(lang_id)]['description']) sys.exit(1) # confirm guessed_unmatch = ([problem.get_url()] != guessed_urls) if guessed_unmatch: samples_text = ('samples of "{}'.format('", "'.join(guessed_urls)) if guessed_urls else 'no samples') log.warning( 'the problem "%s" is specified to submit, but %s were downloaded in this directory. this may be mis-operation', problem.get_url(), samples_text) if args.wait: log.status('sleep(%.2f)', args.wait) time.sleep(args.wait) if not args.yes: if guessed_unmatch: problem_id = problem.get_url().rstrip('/').split( '/')[-1].split('?')[-1] # this is too ad-hoc key = problem_id[:3] + (problem_id[-1] if len(problem_id) >= 4 else '') sys.stdout.write('Are you sure? Please type "{}" '.format(key)) sys.stdout.flush() c = sys.stdin.readline().rstrip() if c != key: log.info('terminated.') return else: sys.stdout.write('Are you sure? [y/N] ') sys.stdout.flush() c = sys.stdin.read(1) if c.lower() != 'y': log.info('terminated.') return # submit kwargs = {} if isinstance(problem, onlinejudge.service.topcoder.TopcoderLongContestProblem): if args.full_submission: kwargs['kind'] = 'full' else: kwargs['kind'] = 'example' try: submission = problem.submit_code(code, language_id=LanguageId( args.language), session=sess, **kwargs) except NotLoggedInError: log.failure('login required') sys.exit(1) except SubmissionError: log.failure('submission failed') sys.exit(1) # show result if args.open: browser = webbrowser.get() log.status('open the submission page with: %s', browser.name) opened = browser.open_new_tab(submission.get_url()) if not opened: log.failure( 'failed to open the url. please set the $BROWSER envvar')
def test(args: 'argparse.Namespace') -> None: # list tests if not args.test: args.test = fmtutils.glob_with_format(args.directory, args.format) # by default if args.ignore_backup: args.test = fmtutils.drop_backup_or_hidden_files(args.test) tests = fmtutils.construct_relationship_of_files(args.test, args.directory, args.format) # check wheather GNU time is available if not check_gnu_time(args.gnu_time): log.warning('GNU time is not available: %s', args.gnu_time) args.gnu_time = None if args.mle is not None and args.gnu_time is None: raise RuntimeError('--mle is used but GNU time does not exist') # run tests history = [] # type: List[Dict[str, Any]] if args.jobs is None: for name, paths in sorted(tests.items()): history += [ test_single_case(name, paths['in'], paths.get('out'), args=args) ] else: if os.name == 'nt': log.warning("-j/--jobs opiton is unstable on Windows environmet") with concurrent.futures.ThreadPoolExecutor( max_workers=args.jobs) as executor: lock = threading.Lock() futures = [] # type: List[concurrent.futures.Future] for name, paths in sorted(tests.items()): futures += [ executor.submit(test_single_case, name, paths['in'], paths.get('out'), lock=lock, args=args) ] for future in futures: history += [future.result()] # summarize slowest = -1.0 # type: float slowest_name = '' heaviest = -1.0 # type: float heaviest_name = '' ac_count = 0 for result in history: if result['status'] == 'AC': ac_count += 1 if slowest < result['elapsed']: slowest = result['elapsed'] slowest_name = result['testcase']['name'] if result['memory'] is not None and heaviest < result['memory']: heaviest = result['memory'] heaviest_name = result['testcase']['name'] # print the summary log.emit('') log.status('slowest: %f sec (for %s)', slowest, slowest_name) if heaviest >= 0: if heaviest < MEMORY_WARNING: log.status('max memory: %f MB (for %s)', heaviest, heaviest_name) else: log.warning('max memory: %f MB (for %s)', heaviest, heaviest_name) if ac_count == len(tests): log.success('test ' + log.green('success') + ': %d cases', len(tests)) else: log.failure('test ' + log.red('failed') + ': %d AC / %d cases', ac_count, len(tests)) if args.json: print(json.dumps(history)) if ac_count != len(tests): sys.exit(1)
def test(args: 'argparse.Namespace') -> None: # prepare if not args.test: args.test = cutils.glob_with_format(args.directory, args.format) # by default if args.ignore_backup: args.test = cutils.drop_backup_or_hidden_files(args.test) tests = cutils.construct_relationship_of_files(args.test, args.directory, args.format) if args.error: # float mode match = lambda a, b: compare_as_floats(a, b, args.error) else: def match(a, b): if a == b: return True if args.rstrip and a.rstrip(rstrip_targets) == b.rstrip(rstrip_targets): log.warning('WA if no rstrip') return True return False rstrip_targets = ' \t\r\n\f\v\0' # ruby's one, follow AnarchyGolf slowest = -1 # type: Union[int, float] slowest_name = '' ac_count = 0 history = [] # type: List[Dict[str, Any]] for name, it in sorted(tests.items()): is_input_printed = False def print_input(): nonlocal is_input_printed if args.print_input and not is_input_printed: is_input_printed = True with open(it['in'], 'rb') as inf: log.emit('input:\n%s', utils.snip_large_file_content(inf.read(), limit=40, head=20, tail=10, bold=True)) log.emit('') log.info('%s', name) # run the binary with it['in'].open() as inf: begin = time.perf_counter() answer_byte, proc = utils.exec_command(args.command, shell=True, stdin=inf, timeout=args.tle) end = time.perf_counter() elapsed = end - begin answer = answer_byte.decode() # TODO: the `answer` should be bytes, not str if slowest < elapsed: slowest = elapsed slowest_name = name log.status('time: %f sec', elapsed) proc.terminate() # check TLE, RE or not result = 'AC' if proc.returncode is None: log.failure(log.red('TLE')) result = 'TLE' print_input() elif proc.returncode != 0: log.failure(log.red('RE') + ': return code %d', proc.returncode) result = 'RE' print_input() # check WA or not if 'out' in it: with it['out'].open() as outf: correct = outf.read() # compare if args.mode == 'all': if not match(answer, correct): log.failure(log.red('WA')) print_input() if not args.silent: log.emit('output:\n%s', utils.snip_large_file_content(answer.encode(), limit=40, head=20, tail=10, bold=True)) log.emit('expected:\n%s', utils.snip_large_file_content(correct.encode(), limit=40, head=20, tail=10, bold=True)) result = 'WA' elif args.mode == 'line': answer_words = answer.splitlines() correct_words = correct.splitlines() for i, (x, y) in enumerate(zip(answer_words + [None] * len(correct_words), correct_words + [None] * len(answer_words))): # type: ignore if x is None and y is None: break elif x is None: print_input() log.failure(log.red('WA') + ': line %d: line is nothing: expected "%s"', i + 1, log.bold(y)) result = 'WA' elif y is None: print_input() log.failure(log.red('WA') + ': line %d: unexpected line: output "%s"', i + 1, log.bold(x)) result = 'WA' elif not match(x, y): print_input() log.failure(log.red('WA') + ': line %d: output "%s": expected "%s"', i + 1, log.bold(x), log.bold(y)) result = 'WA' else: assert False else: if not args.silent: log.emit(('output:\n%s' if is_input_printed else '%s'), utils.snip_large_file_content(answer.encode(), limit=40, head=20, tail=10, bold=True)) if result == 'AC': log.success(log.green('AC')) ac_count += 1 # push the result testcase = { 'name': name, 'input': str(it['in'].resolve()), } if 'out' in it: testcase['output'] = str(it['out'].resolve()) history += [{ 'result': result, 'testcase': testcase, 'output': answer, 'exitcode': proc.returncode, 'elapsed': elapsed, }] # summarize log.emit('') log.status('slowest: %f sec (for %s)', slowest, slowest_name) if ac_count == len(tests): log.success('test ' + log.green('success') + ': %d cases', len(tests)) else: log.failure('test ' + log.red('failed') + ': %d AC / %d cases', ac_count, len(tests)) if args.json: print(json.dumps(history)) if ac_count != len(tests): sys.exit(1)