def run(judge_class: Callable[[], JudgeDriver], task: JudgeTask) -> JudgeStatus: LOGGER.info('judge start (contest_id: {}, problem_id: {}, ' 'submission_id: {}, user_id: {}'.format( task.contest_id, task.problem_id, task.id, task.user_id)) zctx = ZstdDecompressor() try: task.code = zctx.decompress(task.code) for test in task.tests: test.input = zctx.decompress(test.input) test.output = zctx.decompress(test.output) except Exception: LOGGER.warning('decompress failed', exc_info=True) with transaction() as s: return _update_submission_status(s, task, JudgeStatus.InternalError) with judge_class() as judge: ret = _prepare(judge, task) if ret: return ret if task.compile_image_name: ret = _compile(judge, task) if ret: return ret ret = _tests(judge, task) LOGGER.info('judge finished (submission_id={}): {}'.format(task.id, ret)) return ret
def test_log(tmpfile): records = [{ "msm_id": 1234, "prb_id": 5678 }, { "msm_id": 9876, "prb_id": 5432 }] with AtlasRecordsWriter(tmpfile, compression=True, log=True) as w: log_file = w.log_file for record in records: w.write(record) # TODO: Methods to simplify log reading? # Zstandard decompression context dict_data = ZstdCompressionDict(dictionary.read_bytes()) ctx = ZstdDecompressor(dict_data=dict_data) f = tmpfile.open("rb") log_f = log_file.open("rb") log = LogEntry.iter_unpack(log_f.read()) for i, (size, msm_id, prb_id) in enumerate(log): rec = json.loads(ctx.decompress(f.read(size)).decode("utf-8")) assert rec == records[i] assert msm_id == records[i]["msm_id"] assert prb_id == records[i]["prb_id"] f.close() log_f.close()
class TinyIndexBase: def __init__(self, item_type: type, num_pages: int, page_size: int): self.item_type = item_type self.num_pages = num_pages self.page_size = page_size self.decompressor = ZstdDecompressor() self.mmap = None def retrieve(self, key: str): index = self._get_key_page_index(key) page = self.get_page(index) if page is None: return [] print("REtrieve", self.index_path, page) return self.convert_items(page) def _get_key_page_index(self, key): key_hash = mmh3.hash(key, signed=False) return key_hash % self.num_pages def get_page(self, i): """ Get the page at index i, decompress and deserialise it using JSON """ page_data = self.mmap[i * self.page_size:(i + 1) * self.page_size] try: decompressed_data = self.decompressor.decompress(page_data) except ZstdError: return None return json.loads(decompressed_data.decode('utf8')) def convert_items(self, items): converted = [self.item_type(*item) for item in items] # print("Converted", items, converted) return converted
class ZstdJsonSerializer(Serializer): def __init__(self): self.compressor = ZstdCompressor() self.decompressor = ZstdDecompressor() def serialize(self, item) -> bytes: return self.compressor.compress(json.dumps(item).encode('utf8')) def deserialize(self, serialized_item: bytes): return json.loads( self.decompressor.decompress(serialized_item).decode('utf8'))
def _get_test_data(contest_id: str, problem_id: str, test_id: str, is_input: bool) -> Response: zctx = ZstdDecompressor() from io import BytesIO with transaction() as s: _ = _validate_token(s, admin_required=True) tc = s.query(TestCase).filter(TestCase.contest_id == contest_id, TestCase.problem_id == problem_id, TestCase.id == test_id).first() if not tc: abort(404) f = BytesIO(zctx.decompress(tc.input if is_input else tc.output)) return send_file(f, as_attachment=True, attachment_filename='{}.{}'.format( test_id, 'in' if is_input else 'out'))
def get_submission(contest_id: str, submission_id: str) -> Response: params, _ = _validate_request() zctx = ZstdDecompressor() with transaction() as s: u = _validate_token(s) contest = s.query(Contest).filter(Contest.id == contest_id).first() if not (contest and contest.is_accessible(u)): abort(404) tmp = s.query(Submission, User.name).filter(Submission.contest_id == contest_id, Submission.id == submission_id, Submission.user_id == User.id).first() if not tmp: abort(404) submission, user_name = tmp if not submission.is_accessible(contest, u): abort(404) ret = submission.to_dict() ret['user_name'] = user_name ret['tests'] = [] for t_raw in s.query(JudgeResult).filter( JudgeResult.submission_id == submission_id).order_by( JudgeResult.status, JudgeResult.test_id): t = t_raw.to_dict() # 不要な情報を削除 t.pop('contest_id') t.pop('problem_id') t.pop('submission_id') t['id'] = t['test_id'] t.pop('test_id') if not (contest.is_finished() or (u and u['admin'])): # コンテスト中&非管理者の場合は # 実行時間とメモリ消費量を返却しない # (NULLの場合はto_dictで設定されないのでpopの引数にNoneを指定) t.pop('time', None) t.pop('memory', None) ret['tests'].append(t) ret['code'] = zctx.decompress(ret['code']).decode('utf-8') return jsonify(ret)