def check(self, reply, clue): server_id = clue job = self._create_checker_job(server_id, self.test_scenario) try: job = self._wait_checker_job_ready(job['id'], timeout=270) except TimeoutError: raise PluginError("Check system ran too long") if job['status'] == CheckerJobResult.FAILED: raise PluginError("Check system failed") assert job['status'] == CheckerJobStatus.COMPLETED return job['result'] == CheckerJobResult.PASSED, job['hint']
def check(self, reply, clue): studio_files = [{ 'name': 'main.qrs', 'content': attachment_content(reply) }] with epicbox.working_directory() as workdir: # noinspection PyAttributeOutsideInit self.log = logger.bind(workdir=workdir) if self.source.fields_archive: # TODO: extract all xmls from archive and upload using one epicbox run fields_files = [{ 'name': 'fields.zip', 'content': attachment_content(self.source.fields_archive[0]) }] self.log.info("Uploading and unpacking fields archive") command = 'mkdir -p fields/main && unzip fields.zip -d fields/main' result = epicbox.run('trik', command=command, files=fields_files, limits=UNARCHIVE_LIMITS, workdir=workdir) if result['exit_code'] != 0: raise PluginError("Failed to extract fields from the archive") self.log.info("Starting trik sandbox") result = epicbox.run('trik', files=studio_files, limits=EPICBOX_TRIK_LIMITS, workdir=workdir) return self._process_checker_result(result, workdir)
def _destroy_server(self, server_id): r = requests.delete(RNR_SERVER_URL.format(server_id=server_id), auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) if r.status_code not in [204, 404]: raise PluginError( "Failed to destroy the virtual machine: {0}".format(server_id))
def _create_bootstrap_sandbox(self, server, script): sandbox_cmd = ( 'fab bootstrap -f /bootstrap/fabfile.py -i /bootstrap/ssh-key ' '-H {ip} -u root --hide=aborts,running,stdout').format( ip=server['private_ip']) sandbox_body = { 'profile': 'linux-bootstrap', 'command': sandbox_cmd, "files": [{ "name": "bootstrap.sh", "content": base64.b64encode(script.encode()).decode() }], "limits": { "cputime": 305, "realtime": 305, "memory": 32, } } headers = {'content-type': 'application/json'} r = requests.post(RNR_SANDBOXES_URL, data=json.dumps(sandbox_body), headers=headers, auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) print("Create bootstrap sandbox response:", r.status_code, r.content) if r.status_code != 201: raise PluginError("Failed to bootstrap your virtual machine") return r.json()
def test_plugin_error_in_plugin(self, quizapi, free_answer_ctxt): with patch('stepic_plugins.quizzes.free_answer.FreeAnswerQuiz.generate', side_effect=PluginError("Generate failed")): with pytest.raises(PluginError) as excinfo: quizapi.generate(free_answer_ctxt) assert excinfo.value.args[0] == "Generate failed"
def _create_terminal(self, server_id): """Create a terminal for the given ACTIVE server.""" terminal_body = { 'server_id': server_id, } r = requests.post(RNR_TERMINALS_URL, data=terminal_body, auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) if r.status_code != 201: raise PluginError("Failed to create a terminal for your virtual " "machine instance") return r.json()
def _wait_server_status(self, server_id, until_status, timeout=110): """Wait for server status to become `until_status`.""" start_time = time.time() while time.time() - start_time < timeout: try: r = requests.get(RNR_SERVER_URL.format(server_id=server_id), auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) except requests.exceptions.Timeout: continue if r: server = r.json() if server['status'] == until_status: return server if server['status'] == ServerStatus.ERROR: raise PluginError("Failed to create new virtual machine " "instance") time.sleep(1) else: # TODO: delete instance raise PluginError("Timed out creating new virtual machine " "instance")
def _create_server(self, image_id, memory): server_body = { 'image_id': image_id, 'memory': memory, } r = requests.post(RNR_SERVERS_URL, data=server_body, auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) print("Create server response:", r.status_code, r.content) if r.status_code != 201: raise PluginError("Failed to create new virtual machine instance") return r.json()
def _wait_db_container_started(self, container): logger.info("Waiting for db container to be ready") try: for _ in range(WAIT_DB_CONTAINER_STARTED_TIMEOUT): ctr_info = self.docker.inspect_container(container) if not ctr_info['State']['Running']: logger.error( "Started db container terminated unexpectedly") raise PluginError( "Failed to start db container (terminated unexpectedly)" ) logs = self.docker.logs(container) if logs.count(b"mysqld: ready for connections") >= 2: logger.info( "DB container started and ready for connections") break time.sleep(1) except (APIError, DockerException): logger.exception( "Failed to start db container '{0}' (timed out)".format( settings.SQL_CONTAINER_NAME)) raise PluginError("Failed to start db container (timed out)")
def async_init(self): r = requests.get(RNR_IMAGE_URL.format(image_id=self.image_id), auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) if r.status_code != 200: if r.status_code == 404: raise FormatError("Image not found with ID: {}".format( self.image_id)) raise PluginError("Internal server error: failed to connect to " "backend which serves virtual machines") if self.memory > MAX_MEMORY_LIMIT: raise FormatError("Maximum value for memory limit is {} MB".format( MAX_MEMORY_LIMIT)) if self.memory < MIN_MEMORY_LIMIT: raise FormatError("Minimum value for memory limit is {} MB".format( MIN_MEMORY_LIMIT)) # Check bootstrap script syntax self._check_bootstrap_script(self.bootstrap_script) # Check pytest scenario (try to collect tests, but don't execute them) test_filename = 'test_scenario.py' pytest_files = [(self.test_scenario, test_filename)] pytest_argv = ['-m', 'pytest', '-s', '--collect-only', test_filename] result = jail_code_wrapper('python', code=None, files=pytest_files, argv=pytest_argv, stdin=None) if result.status != 0: output = result.stdout.decode(errors='replace') errput = result.stderr.decode(errors='replace') if errput: msg = ("Internal error while checking test scenario " "correctness:\n\n{0}{1}".format(output, errput)) logger.error(msg) raise PluginError(msg) msg = "Test scenario code contains errors:\n\n{0}".format(output) raise FormatError(msg) return {'options': {'time_limit': 60 * 60}}
def _create_checker_job(self, server_id, test_scenario): job_body = { 'server': server_id, 'test_scenario': test_scenario, } r = requests.post(RNR_CHECKER_JOBS_URL, data=job_body, auth=RNR_AUTH, timeout=DEFAULT_TIMEOUT) print("Create checker job response:", r.status_code, r.content) if r.status_code != 201: raise PluginError("Failed to create new checker job for server_id:" " {0}".format(server_id)) return r.json()
def generate(self): seed = random.randrange(10**9) try: dataset, clue = self.run_edyrun( 'generate', seed=seed, output_limit=settings.DATASET_QUIZ_SIZE_LIMIT) if not (isinstance(dataset, dict) and 'file' in dataset and isinstance(dataset['file'], str)): raise TypeError("Bad dataset") return dataset, clue except (JailedCodeFailed, ValueError, TypeError) as e: raise PluginError(str(e))
def _bootstrap_server(self, server, script): print("Start to bootstrap server:", server) if not server['private_ip']: raise PluginError("Failed to bootstrap your virtual machine: " "VM network is down") sandbox = self._create_bootstrap_sandbox(server, script) try: sandbox = self._wait_sandbox_terminated(sandbox, timeout=150) except TimeoutError: raise PluginError("Failed to bootstrap your virtual machine: " "took too much time") if (sandbox['status'] == SandboxStatus.SUCCESS and sandbox['exit_code'] != 0): stdout = base64.b64decode( sandbox['stdout']).decode(errors='replace') stderr = base64.b64decode( sandbox['stderr']).decode(errors='replace') raise PluginError("Failed to bootstrap your virtual machine.\n\n" "{0}\n{1}".format(stdout, stderr)) elif sandbox['status'] == SandboxStatus.FAILURE: raise PluginError( "Failed to bootstrap your virtual machine: {0}".format( sandbox['error']))
def _wait_submission_checked(self, submission_id): log = logger.bind(submission_id=submission_id) log.info("Waiting for submission check to be started") check_started_timeout = CHECK_STARTED_TIMEOUT while check_started_timeout > 0: submission = self._get_submission(submission_id) if submission and 'solve_status' in submission: break check_started_timeout -= 1 time.sleep(1) else: msg = "Timed out while waiting for submission check to be started" log.error(msg) raise PluginError(msg) log.info("Submission check started. Waiting for check readiness", submission=submission) check_ready_timeout = CHECK_READY_TIMEOUT while check_ready_timeout > 0: submission = self._get_submission(submission_id) if submission and submission['solve_status'] != '': break check_ready_timeout -= 1 time.sleep(1) else: msg = "Failed to check submission" log.error(msg) raise PluginError(msg) log.info("Submission has been checked successfully", submission=submission) solve_status = submission['solve_status'] if not isinstance(solve_status, (int, float)) or not (0 <= solve_status <= 1): log.error("Submission solve_status is incorrect", solve_status=solve_status) raise PluginError("Failed to score the reply") return submission
def _start_db_container(self): """Start a new db container if it hasn't been started yet.""" try: return self.docker.inspect_container(settings.SQL_CONTAINER_NAME) except NotFound: pass # TODO: start if stopped # TODO: handle docker connection error logger.info("Starting new db container") try: self.docker.inspect_image(settings.SQL_CONTAINER_IMAGE) except NotFound: logger.info( "Docker image for db container wasn't found, pulling...") self.docker.pull(settings.SQL_CONTAINER_IMAGE) logger.info( "Docker image for db container has been pulled successfully") port_bindings = { settings.SQL_CONTAINER_PORT: (settings.SQL_BIND_HOST, settings.SQL_BIND_PORT) } environment = {'MYSQL_ROOT_PASSWORD': settings.SQL_DB_ROOT_PASS} host_config = create_host_config(binds=[ '{}:{}'.format(settings.SQL_CONTAINER_NAME, settings.SQL_CONTAINER_VOLUME) ]) try: ctr = self.docker.create_container( settings.SQL_CONTAINER_IMAGE, command=settings.SQL_DB_CONF_OPTIONS, name=settings.SQL_CONTAINER_NAME, environment=environment, host_config=host_config) self.docker.start(ctr, port_bindings=port_bindings, restart_policy={'Name': 'always'}) except (APIError, DockerException): logger.exception("Failed to start db container '{0}'".format( settings.SQL_CONTAINER_NAME)) raise PluginError("Failed to start db container") self._wait_db_container_started(ctr) return ctr
def check(self, reply, clue): status = self._get_service_status() if status != STATUS_IDDLE: return False, "Service is busy right now. Try to submit again a bit later." data = { 'task_id': self.source.task_id, 'solution': base64.b64encode(attachment_content(reply['solution'])).decode(), 'makefile': '', } if self.source.is_makefile_required: data['makefile'] = base64.b64encode( attachment_content(reply['makefile'])).decode() log = logger.bind(task_id=self.source.task_id) log.info("Creating new submission") response = requests.post(SUBMISSION_LIST_URL, json=data, timeout=DEFAULT_TIMEOUT) if not response: msg = "Failed to check the reply" logger.error(msg, status_code=response.status_code, body=response.text) raise PluginError(msg) submission_id = response.json()['id'] log.info("Submission created", submission_id=submission_id) submission = self._wait_submission_checked(submission_id) feedback_list = [] if submission['error_message']: feedback_list.append("Error: " + submission['error_message']) compilation_log = submission['comp_log'].strip() if compilation_log: feedback_list.append("Compilation log: " + compilation_log) execution_log = submission['comp_exec'].strip() if execution_log: feedback_list.append("Execution log: " + execution_log) feedback_list.append("Unique solution id: {0}".format(submission_id)) feedback = '\n\n'.join(feedback_list) return submission['solve_status'], feedback