def export_from_database(model=None, path=None, name=globalconfig.default_package): assert (model is None) != (path is None) if path is not None: model = get_problem_by_path(norm(path)) with ChangeDir(model.path): try: conf = PackageConfig.get_config(".", name) except TypeError: # Seemingly, this is due to a lacking please_verion. conf = ConfigFile(name) conf["please_version"] = conf["please_version"] or str(globalconfig.please_version) conf["name"] = str(model.name) conf["shortname"] = str(model.short_name) conf["tags"] = "; ".join(map(str, model.tags.all())) conf["type"] = "" conf["input"] = str(model.input) conf["output"] = str(model.output) conf["time_limit"] = str(model.time_limit) conf["memory_limit"] = str(model.memory_limit) conf["checker"] = str(model.checker_path) conf["validator"] = str(model.validator_path) if model.main_solution is not None: conf["main_solution"] = str(model.main_solution.path) conf["statement"] = str(model.statement_path) conf["description"] = str(model.description_path) conf["hand_answer_extension"] = str(model.hand_answer_extension) conf["well_done_test"] = list(map(lambda well_done: well_done.name, model.well_done_test.all())) conf["well_done_answer"] = list(map(lambda well_done: well_done.name, model.well_done_answer.all())) conf["analysis"] = str(model.analysis_path) conf.write() sources = [] already_there = [norm(x["source"]) for x in conf["solution"]] for solution in model.solution_set.all(): solution.path = norm(solution.path) sources.append(str(solution.path)) if str(solution.path) in already_there: continue args = [] if solution.input: args += ["input", str(solution.input)] if solution.output: args += ["output", str(solution.output)] if solution.possible_verdicts.count() != 0: args += ["possible"] + list(map(str, solution.possible_verdicts.all())) if solution.expected_verdicts.count() != 0: args += ["expected"] + list(map(str, solution.expected_verdicts.all())) try: add_solution(str(solution.path), args) except PleaseException: solution.delete() for sol in already_there: if (sol not in sources) and (sol != norm(conf["main_solution"])): del_solution(sol)
def import_to_database(model=None, path=None, name=globalconfig.default_package): assert (model is None) != (path is None) if path is not None: model = get_problem_by_path(norm(path)) problem_path = norm(path or str(model.path)) if not os.path.exists(problem_path): model.delete() return None conf = PackageConfig.get_config(problem_path, name, ignore_cache=True) model.name = conf.get("name", "") print(111, model.name) model.short_name = conf.get("shortname", "") model.input = conf.get("input", "") model.output = conf.get("output", "") model.time_limit = float(conf.get("time_limit", "2.0")) model.memory_limit = int(conf.get("memory_limit", "268435456")) model.checker_path = norm( os.path.relpath(conf.get("checker", ""), os.path.abspath(problem_path)) if conf.get("checker", "") != "" else "" ) model.validator_path = norm(conf.get("validator", "")) model.statement_path = norm(conf.get("statement", "")) model.description_path = norm(conf.get("description", "")) model.analysis_path = norm(conf.get("analysis", "")) model.hand_answer_extension = conf.get("hand_answer_extension", "") old_solutions = {norm(i.path) for i in model.solution_set.all()} for solution in conf.get("solution", []): path = norm(solution["source"]) sol = Solution.objects.get_or_create(path=path, problem=model)[0] old_solutions.discard(path) sol.input = solution.get("input", "") sol.output = solution.get("output", "") sol.expected_verdicts.clear() sol.possible_verdicts.clear() for verdict in solution["expected"]: sol.expected_verdicts.add(Verdict.objects.get_or_create(name=verdict)[0]) for verdict in solution.get("possible"): sol.possible_verdicts.add(Verdict.objects.get_or_create(name=verdict)[0]) if path == norm(conf["main_solution"]): model.main_solution = sol sol.save() for old in old_solutions: model.solution_set.get(path=old).delete() model.tags.clear() for entry in conf.get("tags", []): model.tags.add(ProblemTag.objects.get_or_create(name=entry)[0]) model.well_done_test.clear() for entry in conf.get("well_done_test", []): try: model.well_done_test.add(WellDone.objects.get(name=entry)) except WellDone.DoesNotExist: pass # Bad well done... model.well_done_answer.clear() for entry in conf.get("well_done_answer", []): try: model.well_done_answer.add(WellDone.objects.get(name=entry)) except WellDone.DoesNotExist: pass model.save() print(model.id, model.name) return model
def is_problem_path(path): return PackageConfig.get_config(path) is not None
def export_problem2ejudge(contest_path, task, problem_id): problem_config = PackageConfig.get_config(task) with open(serve_problem_template_name, 'r') as serve_template_file: serve_problem_template = serve_template_file.readlines() tests_info = prepare_tests(os.path.join(task, '.tests')) replaces = {'$id$': str(problem_id), '$shortname$': problem_config['shortname'], '$longname$': problem_config['name'], '$internalname$': problem_config['shortname'], '$inputfile$': problem_config['input'], '$outputfile$': problem_config['output'], '$timelimit_ms$': str(round(float(problem_config['time_limit'])*1000)), '$memorylimit_kb$': str(round(float(problem_config['memory_limit'])*1024)), '$checker$': without_extention(problem_config['checker']), '$testnamelen$': r"%02d" % tests_info[2]} if os.path.join(globalconfig.root, globalconfig.checkers_dir) in problem_config['checker']: try: checker_dir = os.path.join(globalconfig.root, globalconfig.checkers_dir) shutil.copy(os.path.join(problem_config['checker']), os.path.join(task, os.path.split(problem_config['checker'])[1])) shutil.copy(os.path.join(checker_dir,'testlib.h'), os.path.join(task, 'testlib.h')) except: pass for replace in replaces: lambda_replace = lambda x : x.replace(replace, replaces[replace]) serve_problem_template = list(map(lambda_replace, serve_problem_template)) summary_serve_problem = [] for line in serve_problem_template: re_find = re.match('!(.*)!(.*)', line) if not re_find: summary_serve_problem.append(line) elif eval(re_find.groups()[0]): summary_serve_problem.append(re_find.groups()[1]+'\n') serve_cfg_path = os.path.join(contest_path, 'conf', 'serve.cfg') with open(serve_cfg_path, 'a') as serve_file: serve_file.writelines(summary_serve_problem) problem_path = os.path.join(contest_path, 'problems', task) os.mkdir(problem_path) problem_tests_dir = os.path.join(problem_path, 'tests') os.mkdir(problem_tests_dir) for file in tests_info[0]: shutil.copy(file, os.path.join(problem_tests_dir, tests_info[0][file])) for file in tests_info[1]: shutil.copy(file, os.path.join(problem_tests_dir, tests_info[1][file])) os.mkdir(os.path.join(problem_path, 'statements')) for file in get_text_files_from_dir(os.path.join(task, 'statements')): shutil.copy(os.path.join(task, 'statements', file), os.path.join(problem_path, 'statements', file)) for file in filter(lambda x : os.path.isfile(os.path.join(task, x)), os.listdir(task)): if is_text(os.path.join(task, file)): shutil.copy(os.path.join(task, file), os.path.join(problem_path, file))
def export_from_database(model=None, path=None, name=globalconfig.default_package): assert (model is None) != (path is None) if path is not None: model = get_problem_by_path(norm(path)) with ChangeDir(model.path): try: conf = PackageConfig.get_config('.', name) except TypeError: # Seemingly, this is due to a lacking please_verion. conf = ConfigFile(name) conf['please_version'] = conf['please_version'] or str( globalconfig.please_version) conf['name'] = str(model.name) conf['shortname'] = str(model.short_name) conf['tags'] = '; '.join(map(str, model.tags.all())) conf['type'] = '' conf['input'] = str(model.input) conf['output'] = str(model.output) conf['time_limit'] = str(model.time_limit) conf['memory_limit'] = str(model.memory_limit) conf['checker'] = str(model.checker_path) conf['validator'] = str(model.validator_path) if model.main_solution is not None: conf['main_solution'] = str(model.main_solution.path) conf['statement'] = str(model.statement_path) conf['description'] = str(model.description_path) conf['hand_answer_extension'] = str(model.hand_answer_extension) conf['well_done_test'] = list( map(lambda well_done: well_done.name, model.well_done_test.all())) conf['well_done_answer'] = list( map(lambda well_done: well_done.name, model.well_done_answer.all())) conf['analysis'] = str(model.analysis_path) conf.write() sources = [] already_there = [norm(x['source']) for x in conf['solution']] for solution in model.solution_set.all(): solution.path = norm(solution.path) sources.append(str(solution.path)) if str(solution.path) in already_there: continue args = [] if solution.input: args += ['input', str(solution.input)] if solution.output: args += ['output', str(solution.output)] if solution.possible_verdicts.count() != 0: args += (['possible'] + list(map(str, solution.possible_verdicts.all()))) if solution.expected_verdicts.count() != 0: args += (['expected'] + list(map(str, solution.expected_verdicts.all()))) try: add_solution(str(solution.path), args) except PleaseException: solution.delete() for sol in already_there: if (sol not in sources) and (sol != norm(conf['main_solution'])): del_solution(sol)
def import_to_database(model=None, path=None, name=globalconfig.default_package): assert ((model is None) != (path is None)) if path is not None: model = get_problem_by_path(norm(path)) problem_path = norm(path or str(model.path)) if not os.path.exists(problem_path): model.delete() return None conf = PackageConfig.get_config(problem_path, name, ignore_cache=True) model.name = conf.get("name", "") print(111, model.name) model.short_name = conf.get("shortname", "") model.input = conf.get("input", "") model.output = conf.get("output", "") model.time_limit = float(conf.get("time_limit", "2.0")) model.memory_limit = int(conf.get("memory_limit", "268435456")) model.checker_path = norm( os.path.relpath(conf.get("checker", ""), os.path.abspath(problem_path) ) if conf.get("checker", "") != "" else "") model.validator_path = norm(conf.get("validator", "")) model.statement_path = norm(conf.get("statement", "")) model.description_path = norm(conf.get("description", "")) model.analysis_path = norm(conf.get("analysis", "")) model.hand_answer_extension = conf.get("hand_answer_extension", "") old_solutions = {norm(i.path) for i in model.solution_set.all()} for solution in conf.get("solution", []): path = norm(solution['source']) sol = Solution.objects.get_or_create(path=path, problem=model)[0] old_solutions.discard(path) sol.input = solution.get('input', '') sol.output = solution.get('output', '') sol.expected_verdicts.clear() sol.possible_verdicts.clear() for verdict in solution['expected']: sol.expected_verdicts.add( Verdict.objects.get_or_create(name=verdict)[0]) for verdict in solution.get('possible'): sol.possible_verdicts.add( Verdict.objects.get_or_create(name=verdict)[0]) if path == norm(conf['main_solution']): model.main_solution = sol sol.save() for old in old_solutions: model.solution_set.get(path=old).delete() model.tags.clear() for entry in conf.get('tags', []): model.tags.add(ProblemTag.objects.get_or_create(name=entry)[0]) model.well_done_test.clear() for entry in conf.get('well_done_test', []): try: model.well_done_test.add(WellDone.objects.get(name=entry)) except WellDone.DoesNotExist: pass # Bad well done... model.well_done_answer.clear() for entry in conf.get('well_done_answer', []): try: model.well_done_answer.add(WellDone.objects.get(name=entry)) except WellDone.DoesNotExist: pass model.save() print(model.id, model.name) return model