def _validate_time_limit(self, value): if value < CodeQuiz.TL_MIN: raise FormatError("Time Limit should be at least {}".format( CodeQuiz.TL_MIN)) if value > CodeQuiz.TL_MAX: raise FormatError("Time Limit should be at most {}".format( CodeQuiz.TL_MAX))
def _validate_memory_limit(self, value): if value < CodeQuiz.ML_MIN: raise FormatError("Memory Limit should be at least {}".format( CodeQuiz.ML_MIN)) if value > CodeQuiz.ML_MAX: raise FormatError("Memory Limit should be at most {}".format( CodeQuiz.ML_MAX))
def clean_reply(self, reply, dataset): solve_sql = reply.solve_sql.strip() if not solve_sql: raise FormatError("Empty query") if len(sqlparse.split(solve_sql)) > 1: raise FormatError("Only one query is allowed") return solve_sql
def clean_reply(self, reply, dataset): choices = reply.choices if len(choices) != len(dataset.options): raise FormatError('Reply has the wrong length') if not self.is_multiple_choice and sum(choices) != 1: raise FormatError('Only one choice should be `true`') return choices
def __init__(self, source): super().__init__(source) self.columns = source.columns self.rows = source.rows self.description = source.description self.options = source.options self.is_always_correct = source.is_always_correct for row in self.rows: possible_answer = 0 for cell in row.columns: possible_answer += cell.choice if possible_answer > 1 and not self.options.is_checkbox: raise FormatError( "There cannot be multiple right answers in this mode") if possible_answer == 0 and not self.options.is_checkbox \ and not self.is_always_correct: raise FormatError( "There are no right answers for '{0}'".format(row.name)) for i, j in itertools.combinations(range(len(self.rows)), 2): if self.rows[i].name == self.rows[j].name: raise FormatError("There are two rows with the same name " "'{0}'".format(self.rows[i].name)) for i, j in itertools.combinations(range(len(self.columns)), 2): if self.columns[i].name == self.columns[j].name: raise FormatError("There are two columns with the same name " "'{0}'".format(self.columns[i].name))
def __init__(self, source): super().__init__(source) self.options = source.options if not self.options: raise FormatError("Empty options") if len(set(self.options)) != len(self.options): raise FormatError("Ambiguous options")
def _rnr_create_task(self, task_title): """Create new task on rootnroll platform.""" try: r = requests.post('{0}/tasks'.format(self.rnr_api_url), data={'title': task_title}) if r.status_code != 201: raise FormatError("Cannot create new task on rnr platform") except requests.exceptions.RequestException as e: raise FormatError(e)
def __init__(self, source): super().__init__(source) self.is_equation = False left_expr, arrow, right_expr = split_on_arrow(self.source.expression) if arrow: self.is_equation = True if not self._is_valid_expression(self.source.expression, self.is_equation): raise FormatError("Chemical expression is invalid") if not self._is_valid_template(self.source.template): raise FormatError("Chemical template is invalid")
def __init__(self, source): super().__init__(source) if not source.options: raise FormatError("At least one answer option should be provided") self.options = [{ 'answer': parse_decimal(o.answer, 'answer'), 'max_error': parse_decimal(o.max_error, 'max_error') } for o in source.options] if any(o['max_error'] < 0 for o in self.options): raise FormatError("`max_error` should be non-negative")
def async_init(self): try: _, clue = self.generate() except InitSqlError as e: raise FormatError("Init SQL script failed:\n" + MYSQL_ERROR.format(e.args[0], e.args[1])) score, hint = self.check(self.source.solve_sql, clue) if not score: raise FormatError( "The challenge is broken. Please verify your solution " "query and check code.\n" + hint)
def __init__(self, source): super().__init__(source) if len(source.studio_save) != 1: raise FormatError("TRIK Studio save file is not selected") if source.fields_archive: content = attachment_content(source.fields_archive[0]) content_reader = io.BytesIO(content) try: zipfile.ZipFile(content_reader) except zipfile.BadZipFile: raise FormatError("Fields archive is not a valid zip file")
def clean_reply(self, reply, dataset): """Checks that reply is valid and transforms it before `check`.""" # `choices` and `dataset` are parsed. # Make sure that they have the same length and that exactly one choice is correct. choices = reply.choices if len(choices) != len(dataset.options): raise FormatError("Reply has a wrong length") if choices.count(True) != 1: raise FormatError("Reply has more than one choice") return choices
def _rnr_task_id(self, task_title): """Get rootnroll task id by task title.""" try: r = requests.get('{0}/tasks/{1}'.format(self.rnr_api_url, task_title)) except requests.exceptions.RequestException as e: raise FormatError(e) if r.status_code == 200: return r.json()['id'] elif r.status_code == 404: return None raise FormatError("Internal error in rnr platform")
def clean_reply(self, reply, dataset): if len(reply.solution) != 1: raise FormatError("The reply should contain a solution.c file") if self.source.is_makefile_required and len(reply.makefile) != 1: raise FormatError("The reply should contain a Makefile file") clean_reply = { 'solution': reply.solution[0]._original, 'makefile': reply.makefile[0]._original if self.source.is_makefile_required else None, } return clean_reply
def _validate_source(self): if self.samples_count < 0: raise FormatError("Number of sample tests should be non-negative") self._validate_time_limit(self.execution_time_limit) for manual_time_limit in self.source.manual_time_limits: self._validate_language(manual_time_limit.language) self._validate_time_limit(manual_time_limit.time) self._validate_memory_limit(self.execution_memory_limit) for manual_memory_limit in self.source.manual_memory_limits: self._validate_language(manual_memory_limit.language) self._validate_memory_limit(manual_memory_limit.memory) if len(self.source.test_archive) > 1: raise FormatError("Number of test archives should be at most 1")
def __init__(self, source): super().__init__(source) self.preserve_firsts_order = source.preserve_firsts_order self.pairs = source.pairs if not self.pairs: raise FormatError("Empty pairs") first_parts = [pair.first for pair in self.pairs] nonblank_second_parts = [ pair.second for pair in self.pairs if pair.second ] if (len(first_parts) != len(set(first_parts)) or len(nonblank_second_parts) != len(set(nonblank_second_parts))): raise FormatError("Ambiguous pairs")
def check_random(): try: dataset, clue = self.generate() reply = self.run_edyrun( 'solve', data=dataset, output_limit=settings.DATASET_QUIZ_SIZE_LIMIT) score, hint = self.check(reply, clue, throw=True) except JailedCodeFailed as e: raise FormatError(str(e)) if score != 1: hint = '\nHint: {}'.format(hint) if hint else '' raise FormatError( 'score of answer is {score} instead of 1.{hint}'.format( score=score, hint=hint))
def __init__(self, source): super().__init__(source) self.is_multiple_choice = source.is_multiple_choice self.is_always_correct = source.is_always_correct self.sample_size = source.sample_size self.preserve_order = source.preserve_order self.options = source.options if self.is_always_correct: if self.sample_size > len(self.options): raise FormatError('Sample size is greater then the number of available options') else: min_correct, max_correct = self.get_min_max_correct() if min_correct > max_correct: raise FormatError('Not enough answers')
def async_init(self): samples = [] try: tests = self.get_tests() dataset, output = self.run_edyrun('sample') if not dataset: # samples from generate function for i in range(min(self.samples_count, len(tests))): dataset, clue = tests[i] output = self.run_edyrun('solve', data=dataset) samples.append((dataset, output)) else: # samples from tests list samples.append((dataset, output)) except JailedCodeFailed as e: raise FormatError(str(e)) return { 'tests': tests, 'options': { 'execution_time_limit': self.execution_time_limit, 'execution_memory_limit': self.execution_memory_limit, 'limits': self.limits, 'code_templates': { lang: temp[Directives.CODE] for lang, temp in self.code_templates.items() }, 'samples': samples, }, 'warnings': self._generate_warnings(tests), }
def _check_learner_query_result(self, query, result, clue): check_code = textwrap.dedent(""" import MySQLdb db = MySQLdb.connect(host='{host}', port={port}, user='******', passwd='{passwd}', db='{db}') cursor = db.cursor() def generate(): pass def solve(dataset): pass {teacher_check} """).format(host=settings.SQL_DB_HOST, port=settings.SQL_BIND_PORT, user=clue['db_user'], passwd=clue['db_pass'], db=clue['db_name'], teacher_check=self.source.check_code) try: return run('score', check_code, data=(query, result)) except JailedCodeFailed as e: raise FormatError(str(e))
def clean_reply(self, reply, dataset): if len(reply.files) > 1: raise FormatError("More than one file is submitted") # TODO: Add file content normalization here after download links on attachments in # reply is implemented. Currently normalization is performed only on check. Learners # download their original attached files. return reply
def __init__(self, source): super().__init__(source) self.pattern = source.pattern self.case_sensitive = source.case_sensitive self.use_re = source.use_re self.match_substring = source.match_substring if self.use_re: try: r = re.compile(self.pattern) # catching Exception and not re.error because compile can throw # not only re.error (ex pattern = '()'*100) except Exception: raise FormatError('Malformed regular expression') if r.match(''): raise FormatError('Pattern matches empty sting')
def parse_decimal(s, filed_name): for old, new in NUMBER_REPLACEMENTS: s = s.replace(old, new) try: return decimal.Decimal(s) except decimal.DecimalException: raise FormatError("Field `{}` should be a number".format(filed_name))
def async_init(self): if self.use_code: try: answer = self.run_edyrun('solve', data={}) except JailedCodeFailed as e: raise FormatError(str(e)) reply = { 'text': answer, 'files': [], } score, hint = self.check(reply, '', throw=True) if score != 1: hint = '\nHint: {}'.format(hint) if hint else '' raise FormatError( 'score of answer is {score} instead of 1.{hint}'.format( score=score, hint=hint)) return None
def __init__(self, source): super().__init__(source) self.answer = source.answer if not self.answer.strip(): raise FormatError('Correct answer should be non-empty') parse_float = lambda name: float(parse_decimal(getattr(source.numerical_test, name), 'numerical_test.{}'.format(name))) self.z_re_min = parse_float('z_re_min') self.z_re_max = parse_float('z_re_max') self.z_im_min = parse_float('z_im_min') self.z_im_max = parse_float('z_im_max') self.max_error = parse_float('max_error') self.integer_only = source.numerical_test.integer_only if self.z_re_min > self.z_re_max: raise FormatError('Incorrect Re z') if self.z_im_min > self.z_im_max: raise FormatError('Incorrect Im z') if self.max_error < 0: raise FormatError('Incorrect tolerated absolute error')
def get_tests(self): tests = self.run_edyrun("generate", seed=random.randrange(10**6)) tests.extend(self.generate_tests(self.zip_archive)) if not isinstance(tests, collections.Sequence): raise FormatError("Generate should return a Sequence") if len(tests) > 100: raise FormatError("Too many tests (should be <= 100 tests)") if len(tests) == 0: raise FormatError("Empty test sequence (should be > 0 test)") for element in tests: if not (isinstance(element, collections.Sequence) and len(element) == 2): raise FormatError("Test format is wrong") dataset, clue = element if not (isinstance(dataset, str)): raise FormatError("Test format is wrong") for dataset, clue in tests: msg = "{}\ndataset: {}\nclue: {}\nreply: {}\nresult: {}\nhint: {}" reply = self.run_edyrun('solve', data=dataset) result = self.score_one_test(reply, clue, throw=True) if result[0] != 1: raise FormatError( msg.format("Test is broken", dataset, clue, reply, *result)) return tests
def __init__(self, source): """Initializes quiz instance from parsed source and raises `FormatError` if source is invalid.""" super().__init__(source) # `source` is parsed, that is, it is an object with `options` field, and not a dict. self.options = source.options correct_options = [ option for option in self.options if option.is_correct ] # Can't check via Schemas.source that there is exactly one correct option, # so do it here. if len(correct_options) != 1: raise FormatError("Exactly one option must be correct")
def _check_bootstrap_script(self, script): with tempfile.NamedTemporaryFile(prefix='bootstrap-', suffix='.sh', mode='w', encoding='utf-8') as tf: tf.file.write(script) tf.flush() proc = subprocess.Popen(['/bin/bash', '-n', tf.name], stderr=subprocess.PIPE) try: _, stderr = proc.communicate(timeout=5) except subprocess.TimeoutExpired: try: proc.kill() except ProcessLookupError: pass raise FormatError("Cannot check bootstrap script syntax, " "took too much time") if proc.returncode != 0: msg = "Syntax error in bootstrap script:\n\n{0}".format( stderr.decode(errors='replace')) raise FormatError(msg)
def async_init(self): r = requests.get(RNR_IMAGE_URL.format(image_id=self.image_id), auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) if r.status_code != 200: if r.status_code == 404: raise FormatError("Image not found with ID: {}".format( self.image_id)) raise PluginError("Internal server error: failed to connect to " "backend which serves virtual machines") if self.memory > MAX_MEMORY_LIMIT: raise FormatError("Maximum value for memory limit is {} MB".format( MAX_MEMORY_LIMIT)) if self.memory < MIN_MEMORY_LIMIT: raise FormatError("Minimum value for memory limit is {} MB".format( MIN_MEMORY_LIMIT)) # Check bootstrap script syntax self._check_bootstrap_script(self.bootstrap_script) # Check pytest scenario (try to collect tests, but don't execute them) test_filename = 'test_scenario.py' pytest_files = [(self.test_scenario, test_filename)] pytest_argv = ['-m', 'pytest', '-s', '--collect-only', test_filename] result = jail_code_wrapper('python', code=None, files=pytest_files, argv=pytest_argv, stdin=None) if result.status != 0: output = result.stdout.decode(errors='replace') errput = result.stderr.decode(errors='replace') if errput: msg = ("Internal error while checking test scenario " "correctness:\n\n{0}{1}".format(output, errput)) logger.error(msg) raise PluginError(msg) msg = "Test scenario code contains errors:\n\n{0}".format(output) raise FormatError(msg) return {'options': {'time_limit': 60 * 60}}
def __init__(self, source): super().__init__(source) if any(comp.type not in ComponentType.all for comp in self.source.components): raise FormatError("Invalid component type") self.blanks = [ comp for comp in self.source.components if comp.type != ComponentType.TEXT ] if not self.blanks: raise FormatError( "The problem should contain at least one blank block") for component in self.source.components: if component.type != ComponentType.TEXT: continue text = component.text component.text = clean_html(component.text, strip=False) is_incorrect_html = lambda a, b: component.text.replace( a, b).count(b) != text.count(b) if is_incorrect_html('>', '>') or is_incorrect_html( '<', '<'): raise FormatError('Incorrect html: {}'.format(text))