def _verify_inputs(self, tests): """Check if correct solution exits with code 0 on all tests. :raises: :class:`~oioioi.problems.package.ProblemPackageError` otherwise. """ env = self._find_and_compile('inwer') if env and not self.use_make: jobs = {} for test in tests: job = env.copy() job['job_type'] = 'inwer' job['task_priority'] = TASK_PRIORITY job['exe_file'] = env['compiled_file'] job['in_file'] = django_to_filetracker_path(test.input_file) job['use_sandboxes'] = self.use_sandboxes jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) for test_name, job in six.iteritems(jobs): if job['result_code'] != 'OK': raise ProblemPackageError(_("Inwer failed on test " "%(test)s. Inwer output %(output)s") % { 'test': test_name, 'output': '\n'.join(job['stdout'])} ) logger.info("%s: inwer success", self.filename)
def _verify_inputs(self, tests): """Check if correct solution exits with code 0 on all tests. :raises: :class:`~oioioi.problems.package.ProblemPackageError` otherwise. """ env = self._find_and_compile('inwer') if env and not self.use_make: jobs = {} for test in tests: job = env.copy() job['job_type'] = 'inwer' job['task_priority'] = TASK_PRIORITY job['exe_file'] = env['compiled_file'] job['in_file'] = django_to_filetracker_path(test.input_file) job['use_sandboxes'] = self.use_sandboxes jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) for test_name, job in jobs.iteritems(): if job['result_code'] != 'OK': raise ProblemPackageError( _("Inwer failed on test " "%(test)s. Inwer output %(output)s") % { 'test': test_name, 'output': '\n'.join(job['stdout']) }) logger.info("%s: inwer success", self.filename)
def _make_outs(self, outs_to_make): """Compiles the model solution and executes it in order to generate test outputs. :return: Result from workers. """ env = self._find_and_compile('', command='outgen') if not env: return {} jobs = {} for outname, test in outs_to_make: job = env.copy() job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec' job['task_priority'] = TASK_PRIORITY job['exe_file'] = env['compiled_file'] job['upload_out'] = True job['in_file'] = django_to_filetracker_path(test.input_file) job['out_file'] = outname if test.memory_limit: job['exec_mem_limit'] = test.memory_limit jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) return jobs
def test_sioworkers_bindings(self): env = run_sioworkers_job(dict(job_type='ping', ping='e1')) self.assertEqual(env.get('pong'), 'e1') envs = run_sioworkers_jobs( dict(key1=dict(job_type='ping', ping='e1'), key2=dict(job_type='ping', ping='e2'))) self.assertEqual(envs['key1'].get('pong'), 'e1') self.assertEqual(envs['key2'].get('pong'), 'e2') self.assertEqual(len(envs), 2)
def _make_outs(self, outs_to_make): env = self._find_and_compile('', command='outgen') if not env: return {} jobs = {} for outname, test in outs_to_make: job = env.copy() job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec' job['exe_file'] = env['compiled_file'] job['upload_out'] = True job['in_file'] = django_to_filetracker_path(test.input_file) job['out_file'] = outname jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) return jobs
def _verify_ins(self, tests): env = self._find_and_compile('inwer') if env and not self.use_make: jobs = {} for test in tests: job = env.copy() job['job_type'] = 'inwer' job['exe_file'] = env['compiled_file'] job['in_file'] = django_to_filetracker_path(test.input_file) job['use_sandboxes'] = self.use_sandboxes jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) for test_name, job in jobs.iteritems(): if job['result_code'] != 'OK': raise ProblemPackageError(_("Inwer failed on test " "%(test)s. Inwer output %(output)s") % {'test': test_name, 'output': '\n'.join(job['stdout'])} ) logger.info("%s: inwer success", self.filename)
def _make_outs(self, outs_to_make): """Run jobs to generate test outputs. :return: Result from workers. """ env = self._find_and_compile('', command='outgen') if not env: return {} jobs = {} for outname, test in outs_to_make: job = env.copy() job['job_type'] = 'exec' if self.use_sandboxes else 'unsafe-exec' job['task_priority'] = TASK_PRIORITY job['exe_file'] = env['compiled_file'] job['upload_out'] = True job['in_file'] = django_to_filetracker_path(test.input_file) job['out_file'] = outname if test.memory_limit: job['exec_mem_limit'] = test.memory_limit jobs[test.name] = job jobs = run_sioworkers_jobs(jobs) get_client().delete_file(env['compiled_file']) return jobs
def run_tests(env, kind=None, **kwargs): """Runs tests and saves their results into the environment If ``kind`` is specified, only tests with the given kind will be run. Used ``environ`` keys: * ``tests``: this should be a dictionary, mapping test name into the environment to pass to the ``exec`` job * ``unsafe_exec``: set to ``True`` if we want to use only ``ulimit()`` to limit the executable file resources, ``False`` otherwise (see the documentation for ``unsafe-exec`` job for more information), * ``compiled_file``: the compiled file which will be tested, * ``check_outputs``: set to ``True`` if the output should be verified * ``checker``: if present, it should be the filetracker path of the binary used as the output checker, * ``save_outputs``: set to ``True`` if and only if each of test results should have its output file attached. * ``sioworkers_extra_args``: dict mappting kinds to additional arguments passed to :fun:`oioioi.sioworkers.jobs.run_sioworkers_jobs` (kwargs). Produced ``environ`` keys: * ``test_results``: a dictionary, mapping test names into dictionaries with the following keys: ``result_code`` test status: OK, WA, RE, ... ``result_string`` detailed supervisor information (for example, where the required and returned outputs differ) ``time_used`` total time used, in miliseconds ``mem_used`` memory usage, in KiB ``num_syscalls`` number of syscalls performed ``out_file`` filetracker path to the output file (only if ``env['save_outputs']`` was set) If the dictionary already exists, new test results are appended. """ jobs = dict() for test_name, test_env in env['tests'].iteritems(): if kind and test_env['kind'] != kind: continue job = test_env.copy() job['job_type'] = (env.get('exec_mode', '') + '-exec').lstrip('-') job['exe_file'] = env['compiled_file'] job['check_output'] = env.get('check_outputs', True) if env.get('checker'): job['chk_file'] = env['checker'] if env.get('save_outputs'): job.setdefault('out_file', _make_filename(env, test_name + '.out')) job['upload_out'] = True jobs[test_name] = job extra_args = env.get('sioworkers_extra_args', {}).get(kind, {}) jobs = run_sioworkers_jobs(jobs, **extra_args) env.setdefault('test_results', {}) for test_name, result in jobs.iteritems(): env['test_results'].setdefault(test_name, {}).update(result) return env
def run_tests(env, kind=None, **kwargs): """Runs tests and saves their results into the environment If ``kind`` is specified, only tests with the given kind will be run. Used ``environ`` keys: * ``tests``: this should be a dictionary, mapping test name into the environment to pass to the ``exec`` job * ``unsafe_exec``: set to ``True`` if we want to use only ``ulimit()`` to limit the executable file resources, ``False`` otherwise (see the documentation for ``unsafe-exec`` job for more information), * ``compiled_file``: the compiled file which will be tested, * ``checker``: if present, it should be the filetracker path] of the binary used as the output checker, * ``save_outputs``: set to ``True`` if and only if each of test results should have its output file attached. Produced ``environ`` keys: * ``test_results``: a dictionary, mapping test names into dictionaries with the following keys: ``result_code`` test status: OK, WA, RE, ... ``result_string`` detailed supervisor information (for example, where the required and returned outputs differ) ``time_used`` total time used, in seconds ``exectime_used`` CPU time used, in seconds ``mem_used`` memory usage, in kB ``num_syscalls`` number of syscalls performed ``out_file`` filetracker path to the output file (only if ``env['save_outputs']`` was set) If the dictionary already exists, new test results are appended. """ jobs = dict() for test_name, test_env in env["tests"].iteritems(): if kind and test_env["kind"] != kind: continue job = test_env.copy() job["job_type"] = (env.get("exec_mode", "") + "-exec").lstrip("-") job["exe_file"] = env["compiled_file"] job["check_output"] = True if env.get("checker"): job["chk_file"] = env["checker"] if env.get("save_outputs"): job["out_file"] = _make_filename(env, test_name + ".out") job["upload_out"] = True jobs[test_name] = job jobs = run_sioworkers_jobs(jobs) env.setdefault("test_results", {}) for test_name, result in jobs.iteritems(): env["test_results"].setdefault(test_name, {}).update(result) return env
def run_tests(env, kind=None, **kwargs): """Runs tests and saves their results into the environment If ``kind`` is specified, only tests with the given kind will be run. Used ``environ`` keys: * ``tests``: this should be a dictionary, mapping test name into the environment to pass to the ``exec`` job * ``unsafe_exec``: set to ``True`` if we want to use only ``ulimit()`` to limit the executable file resources, ``False`` otherwise (see the documentation for ``unsafe-exec`` job for more information), * ``compiled_file``: the compiled file which will be tested, * ``checker``: if present, it should be the filetracker path] of the binary used as the output checker, * ``save_outputs``: set to ``True`` if and only if each of test results should have its output file attached. Produced ``environ`` keys: * ``test_results``: a dictionary, mapping test names into dictionaries with the following keys: ``result_code`` test status: OK, WA, RE, ... ``result_string`` detailed supervisor information (for example, where the required and returned outputs differ) ``time_used`` total time used, in seconds ``exectime_used`` CPU time used, in seconds ``mem_used`` memory usage, in kB ``num_syscalls`` number of syscalls performed ``out_file`` filetracker path to the output file (only if ``env['save_outputs']`` was set) If the dictionary already exists, new test results are appended. """ jobs = dict() for test_name, test_env in env['tests'].iteritems(): if kind and test_env['kind'] != kind: continue job = test_env.copy() job['job_type'] = (env.get('exec_mode', '') + '-exec').lstrip('-') job['exe_file'] = env['compiled_file'] job['check_output'] = True if env.get('checker'): job['chk_file'] = env['checker'] if env.get('save_outputs'): job['out_file'] = _make_filename(env, test_name + '.out') job['upload_out'] = True jobs[test_name] = job jobs = run_sioworkers_jobs(jobs) env.setdefault('test_results', {}) for test_name, result in jobs.iteritems(): env['test_results'].setdefault(test_name, {}).update(result) return env