Beispiel #1
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []
        if isinstance(gen, str):
            filename = os.path.join(get_problem_root(self.problem.id), gen)
        else:
            filename = gen.source
            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args

        executor = self.problem.generator_manager.get_generator(
            filename, flags)
        # convert all args to str before launching; allows for smoother int passing
        proc = executor.launch_unsafe(*map(str, args),
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        try:
            input = self.problem.problem_data[
                self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None
        self._generated = map(self._normalize, proc.communicate(input))
Beispiel #2
0
    def test_all(self):
        total_fails = 0

        for problem, _ in get_supported_problems():
            if self.problem_regex is not None and not self.problem_regex.match(
                    problem):
                continue
            root = get_problem_root(problem)
            test_dir = os.path.join(root, 'tests')
            if os.path.isdir(test_dir):
                fails = self.test_problem(problem, test_dir)
                if fails:
                    self.output(
                        ansi_style(
                            'Problem #ansi[%s](cyan|bold) #ansi[failed %d case(s)](red|bold).'
                        ) % (problem, fails))
                else:
                    self.output(
                        ansi_style(
                            'Problem #ansi[%s](cyan|bold) passed with flying colours.'
                        ) % problem)
                self.output()
                total_fails += fails

        return total_fails
Beispiel #3
0
    def __init__(self, problem_id, time_limit, memory_limit, meta):
        self.id = problem_id
        self.time_limit = time_limit
        self.memory_limit = memory_limit
        self.meta = ConfigNode(meta)
        self.generator_manager = GeneratorManager()

        # Cache root dir so that we don't need to scan all roots (potentially very slow on networked mount).
        self.root_dir = get_problem_root(problem_id)
        self.problem_data = ProblemDataManager(self)

        # Checkers modules must be stored in a dict, for the duration of execution,
        # lest globals be deleted with the module.
        self._checkers = {}

        try:
            doc = yaml.safe_load(self.problem_data['init.yml'])
            if not doc:
                raise InvalidInitException(
                    'I find your lack of content disturbing.')
            self.config = ConfigNode(doc,
                                     defaults={
                                         'wall_time_factor': 3,
                                         'output_prefix_length': 64,
                                         'output_limit_length': 25165824,
                                         'binary_data': False,
                                         'short_circuit': True,
                                         'symlinks': {},
                                         'meta': meta,
                                     })
        except (IOError, KeyError, ParserError, ScannerError) as e:
            raise InvalidInitException(str(e))

        self.problem_data.archive = self._resolve_archive_files()
        self._resolve_test_cases()
Beispiel #4
0
 def _generate_binary(self):
     unitTestFile = open(os.path.join(get_problem_root(self.problem.id), self.problem.config['unit_test']),'r')
     unitTestCode = unitTestFile.read()
     unitTestFile.close()
     finalCode = self.source.decode() + '\n\n' + unitTestCode
     return executors[self.language].Executor(self.problem.id, finalCode.encode(),
                                              hints=self.problem.config.hints or [],
                                              unbuffered=self.problem.config.unbuffered)
Beispiel #5
0
 def __missing__(self, key):
     try:
         return open(os.path.join(get_problem_root(self.problem_id), key), 'r').read()
     except IOError:
         if self.archive:
             zipinfo = self.archive.getinfo(key)
             return self.archive.open(zipinfo).read()
         raise KeyError()
Beispiel #6
0
 def __missing__(self, key):
     try:
         return open(os.path.join(get_problem_root(self.problem_id), key), 'rb').read()
     except IOError:
         if self.archive:
             zipinfo = self.archive.getinfo(key)
             return self.archive.open(zipinfo).read()
         raise KeyError('file "%s" could not be found' % key)
Beispiel #7
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []

        # resource limits on how to run the generator
        time_limit = env.generator_time_limit
        memory_limit = env.generator_memory_limit
        use_sandbox = env.generator_sandboxing

        base = get_problem_root(self.problem.id)
        if isinstance(gen, six.string_types):
            filename = os.path.join(base, gen)
        else:
            filename = os.path.join(base, gen.source)
            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args

            time_limit = gen.time_limit or time_limit
            memory_limit = gen.memory_limit or memory_limit

            # Optionally allow disabling the sandbox
            if gen.use_sandbox is not None:
                use_sandbox = gen.use_sandbox

        executor = self.problem.generator_manager.get_generator(filename, flags)

        # convert all args to str before launching; allows for smoother int passing
        args = map(str, args)

        # we allow both "trusted" and "untrusted" generators, for different scenarios:
        # e.g., an untrusted generator may be one generated via site-managed data by an
        # arbitrary user, who shouldn't be allowed to do arbitrary things on the host machine
        if use_sandbox:
            # setting large buffers is really important, because otherwise stderr is unbuffered
            # and the generator begins calling into cptbox Python code really frequently
            proc = executor.launch(*args, time=time_limit, memory=memory_limit, pipe_stderr=True,
                                   stderr_buffer_size=65536, stdout_buffer_size=65536)
        else:
            proc = executor.launch_unsafe(*args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)

        try:
            input = self.problem.problem_data[self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None
        self._generated = list(map(self._normalize, proc.communicate(input)))

        if hasattr(proc, 'tle') and proc.tle:
            raise InternalError('generator timed out (> %s seconds)' % time_limit)
        if hasattr(proc, 'mle') and proc.mle:
            raise InternalError('generator ran out of memory (> %s Kb)' % memory_limit)
        if hasattr(proc, 'protection_fault') and proc.protection_fault:
            syscall, callname, args = proc.protection_fault
            raise InternalError('generator invoked disallowed syscall %s (%s)' % (syscall, callname))
        if proc.returncode:
            raise InternalError('generator exited with nonzero code: %s' % proc.returncode)
Beispiel #8
0
 def __missing__(self, key):
     base = get_problem_root(self.problem_id)
     try:
         return open(os.path.join(base, key), 'rb').read()
     except IOError:
         if self.archive:
             zipinfo = self.archive.getinfo(key)
             return self.archive.open(zipinfo).read()
         raise KeyError('file "%s" could not be found in "%s"' % (key, base))
Beispiel #9
0
 def _generate_interactor_binary(self):
     files = self.handler_data.files
     if not isinstance(files, list):
         files = [files]
     files = [
         os.path.join(get_problem_root(self.problem.id), f) for f in files
     ]
     return compile_with_auxiliary_files(
         files, self.handler_data.lang,
         self.handler_data.compiler_time_limit)
Beispiel #10
0
def get_executor(problem_id, files, flags, lang, compiler_time_limit, should_cache):
    if isinstance(files, str):
        filenames = [files]
    elif isinstance(files.unwrap(), list):
        filenames = list(files.unwrap())

    filenames = [os.path.join(get_problem_root(problem_id), f) for f in filenames]
    executor = compile_with_auxiliary_files(filenames, flags, lang, compiler_time_limit, should_cache)

    return executor
Beispiel #11
0
 def _resolve_archive_files(self):
     if self.config.archive:
         archive_path = os.path.join(get_problem_root(self.id), self.config.archive)
         if not os.path.exists(archive_path):
             raise InvalidInitException('archive file "%s" does not exist' % archive_path)
         try:
             archive = zipfile.ZipFile(archive_path, 'r')
         except zipfile.BadZipfile:
             raise InvalidInitException('bad archive: "%s"' % archive_path)
         return archive
     return None
Beispiel #12
0
 def _resolve_archive_files(self):
     if self.config.archive:
         archive_path = os.path.join(get_problem_root(self.id), self.config.archive)
         if not os.path.exists(archive_path):
             raise InvalidInitException('archive file "%s" does not exist' % archive_path)
         try:
             archive = zipfile.ZipFile(archive_path, 'r')
         except zipfile.BadZipfile:
             raise InvalidInitException('bad archive: "%s"' % archive_path)
         return archive
     return None
Beispiel #13
0
 def __missing__(self, key):
     base = get_problem_root(self.problem_id)
     try:
         with open(os.path.join(base, key), 'rb') as f:
             return f.read()
     except IOError:
         if self.archive:
             zipinfo = self.archive.getinfo(key)
             return self.archive.open(zipinfo).read()
         raise KeyError('file "%s" could not be found in "%s"' %
                        (key, base))
Beispiel #14
0
 def _generate_interactor_binary(self):
     files = self.handler_data.files
     if isinstance(files, str):
         filenames = [files]
     elif isinstance(files.unwrap(), list):
         filenames = list(files.unwrap())
     filenames = [os.path.join(get_problem_root(self.problem.id), f) for f in filenames]
     flags = self.handler_data.get('flags', [])
     should_cache = self.handler_data.get('cached', True)
     return compile_with_auxiliary_files(
         filenames, flags, self.handler_data.lang, self.handler_data.compiler_time_limit, should_cache,
     )
Beispiel #15
0
def get_executor(files, lang, compiler_time_limit, problem_id):
    global executor

    if executor is None:
        if not isinstance(files, list):
            files = [files]
        filenames = [
            os.path.join(get_problem_root(problem_id), f) for f in files
        ]
        executor = compile_with_auxiliary_files(
            filenames, compiler_time_limit=compiler_time_limit)

    return executor
Beispiel #16
0
def get_executor(checker_kwargs, problem_id):
    global executor

    if executor is None:
        if 'files' not in checker_kwargs:
            raise InternalError('no checker file(s) specified!')
        if 'lang' not in checker_kwargs:
            raise InternalError('language not specified for checker!')

        filenames = list(map(lambda x: os.path.join(get_problem_root(problem_id), x), checker_kwargs['files']))
        lang = checker_kwargs['lang']
        executor = compile_with_auxiliary_files(filenames, lang, checker_kwargs['compiler_time_limit'])

    return executor
Beispiel #17
0
    def __init__(self, problem_id, time_limit, memory_limit, meta):
        self.id = problem_id
        self.time_limit = time_limit
        self.memory_limit = memory_limit
        self.meta = ConfigNode(meta)

        # Cache root dir so that we don't need to scan all roots (potentially very slow on networked mount).
        self.root_dir = get_problem_root(problem_id)
        self.problem_data = ProblemDataManager(self.root_dir)

        # Checkers modules must be stored in a dict, for the duration of execution,
        # lest globals be deleted with the module.
        self._checkers = {}

        self.config = ProblemConfig(self.problem_data, meta)

        self.problem_data.archive = self._resolve_archive_files()
        self._resolve_test_cases()
Beispiel #18
0
    def test_problem(self, problem_id):
        self.output(
            ansi_style('Testing problem #ansi[%s](cyan|bold)...') % problem_id)

        config = ProblemConfig(ProblemDataManager(
            get_problem_root(problem_id)))

        if not config or 'tests' not in config or not config['tests']:
            self.output(
                ansi_style('\t#ansi[Skipped](magenta|bold) - No tests found'))
            return 0

        fails = 0
        for test in config['tests']:
            # Do this check here as we need some way to identify the test
            if 'source' not in test:
                self.output(
                    ansi_style(
                        '\t[Skipped](magenta|bold) - No source found for test')
                )
                continue

            test_name = test.get('label', test['source'])
            self.output(
                ansi_style('\tRunning test #ansi[%s](yellow|bold)') %
                test_name)
            try:
                test_fails = self.run_test(problem_id, test)
            except Exception:
                fails += 1
                self.output(
                    ansi_style(
                        '\t#ansi[Test failed with exception:](red|bold)'))
                self.output(traceback.format_exc())
            else:
                self.output(
                    ansi_style('\tResult of test #ansi[%s](yellow|bold): ') %
                    test_name + ansi_style([
                        '#ansi[Failed](red|bold)', '#ansi[Success](green|bold)'
                    ][not test_fails]))
                fails += test_fails

        return fails
Beispiel #19
0
    def test_all(self):
        total_fails = 0

        for problem, _ in get_supported_problems():
            if self.problem_regex is not None and not self.problem_regex.match(problem):
                continue
            root = get_problem_root(problem)
            test_dir = os.path.join(root, 'tests')
            if os.path.isdir(test_dir):
                fails = self.test_problem(problem, test_dir)
                if fails:
                    self.output(ansi_style('Problem #ansi[%s](cyan|bold) #ansi[failed %d case(s)](red|bold).') %
                                (problem, fails))
                else:
                    self.output(ansi_style('Problem #ansi[%s](cyan|bold) passed with flying colours.') % problem)
                self.output()
                total_fails += fails

        return total_fails
Beispiel #20
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []
        if isinstance(gen, str):
            filename = os.path.join(get_problem_root(self.problem.id), gen)
        else:
            filename = gen.source
            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args

        executor = self.problem.generator_manager.get_generator(filename, flags)
        # convert all args to str before launching; allows for smoother int passing
        proc = executor.launch_unsafe(*map(str, args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        try:
            input = self.problem.problem_data[self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None
        self._generated = map(self._normalize, proc.communicate(input))
Beispiel #21
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []

        # resource limits on how to run the generator
        time_limit = env.generator_time_limit
        memory_limit = env.generator_memory_limit
        compiler_time_limit = env.generator_compiler_time_limit
        lang = None  # Default to C/C++

        base = get_problem_root(self.problem.id)
        if isinstance(gen, str):
            filenames = gen
        elif isinstance(gen.unwrap(), list):
            filenames = list(gen.unwrap())
        else:
            if isinstance(gen.source, str):
                filenames = gen.source
            elif isinstance(gen.source.unwrap(), list):
                filenames = list(gen.source.unwrap())
            else:
                raise InvalidInitException('invalid generator declaration')

            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args

            time_limit = gen.time_limit or time_limit
            memory_limit = gen.memory_limit or memory_limit
            compiler_time_limit = gen.compiler_time_limit or compiler_time_limit
            lang = gen.language

        if not isinstance(filenames, list):
            filenames = [filenames]

        filenames = [
            os.path.abspath(os.path.join(base, name)) for name in filenames
        ]
        executor = compile_with_auxiliary_files(filenames, flags, lang,
                                                compiler_time_limit)

        # convert all args to str before launching; allows for smoother int passing
        args = map(str, args)

        # setting large buffers is really important, because otherwise stderr is unbuffered
        # and the generator begins calling into cptbox Python code really frequently
        proc = executor.launch(*args,
                               time=time_limit,
                               memory=memory_limit,
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               stderr_buffer_size=65536,
                               stdout_buffer_size=65536)

        try:
            input = self.problem.problem_data[
                self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None

        stdout, stderr = proc.unsafe_communicate(input)
        self._generated = list(map(self._normalize, (stdout, stderr)))

        parse_helper_file_error(proc, executor, 'generator', stderr,
                                time_limit, memory_limit)
Beispiel #22
0
 def load_checker(self, name):
     if name in self._checkers:
         return self._checkers[name]
     self._checkers[name] = checker = load_module_from_file(os.path.join(get_problem_root(self.id), name))
     return checker
Beispiel #23
0
    def run_test(self, problem_id, config):
        if 'targets' in config and not self._check_targets(config['targets']):
            return 0

        return self._run_test_case(problem_id, get_problem_root(problem_id),
                                   config)
Beispiel #24
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []

        # resource limits on how to run the generator
        time_limit = env.generator_time_limit
        memory_limit = env.generator_memory_limit
        compiler_time_limit = env.compiler_time_limit
        use_sandbox = env.generator_sandboxing
        lang = None  # Default to C/C++

        base = get_problem_root(self.problem.id)
        if isinstance(gen, six.string_types):
            filenames = gen
        elif isinstance(gen.unwrap(), list):
            filenames = list(gen.unwrap())
        else:
            if isinstance(gen.source, six.string_types):
                filenames = gen.source
            elif isinstance(gen.source.unwrap(), list):
                filenames = list(gen.source.unwrap())

            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args


            time_limit = gen.time_limit or time_limit
            memory_limit = gen.memory_limit or memory_limit
            compiler_time_limit = gen.compiler_time_limit or compiler_time_limit
            lang = gen.language

            # Optionally allow disabling the sandbox
            if gen.use_sandbox is not None:
                use_sandbox = gen.use_sandbox

        if not isinstance(filenames, list):
            filenames = [filenames]

        filenames = [os.path.join(base, name) for name in filenames]

        executor = self.problem.generator_manager.get_generator(filenames, flags, lang=lang,
                                                                compiler_time_limit=compiler_time_limit)

        # convert all args to str before launching; allows for smoother int passing
        args = map(str, args)

        # we allow both "trusted" and "untrusted" generators, for different scenarios:
        # e.g., an untrusted generator may be one generated via site-managed data by an
        # arbitrary user, who shouldn't be allowed to do arbitrary things on the host machine
        if use_sandbox:
            # setting large buffers is really important, because otherwise stderr is unbuffered
            # and the generator begins calling into cptbox Python code really frequently
            proc = executor.launch(*args, time=time_limit, memory=memory_limit, pipe_stderr=True,
                                   stderr_buffer_size=65536, stdout_buffer_size=65536)
        else:
            proc = executor.launch_unsafe(*args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)

        try:
            input = self.problem.problem_data[self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None
        self._generated = list(map(self._normalize, proc.communicate(input)))

        if hasattr(proc, 'tle') and proc.tle:
            raise InternalError('generator timed out (> %s seconds)' % time_limit)
        if hasattr(proc, 'mle') and proc.mle:
            raise InternalError('generator ran out of memory (> %s Kb)' % memory_limit)
        if hasattr(proc, 'protection_fault') and proc.protection_fault:
            syscall, callname, args = proc.protection_fault
            raise InternalError('generator invoked disallowed syscall %s (%s)' % (syscall, callname))
        if proc.returncode:
            raise InternalError('generator exited with nonzero code: %s' % proc.returncode)
Beispiel #25
0
 def __init__(self, judge, problem, language, source):
     self.judge = judge
     self.mod = load_module_from_file(os.path.join(get_problem_root(problem.id), problem.config['custom_judge']))
     self._grader = self.mod.Grader(judge, problem, language, source)
Beispiel #26
0
    def _run_generator(self, gen, args=None):
        flags = []
        args = args or []

        # resource limits on how to run the generator
        time_limit = env.generator_time_limit
        memory_limit = env.generator_memory_limit
        compiler_time_limit = env.generator_compiler_time_limit
        lang = None  # Default to C/C++

        base = get_problem_root(self.problem.id)
        if isinstance(gen, str):
            filenames = gen
        elif isinstance(gen.unwrap(), list):
            filenames = list(gen.unwrap())
        else:
            if isinstance(gen.source, str):
                filenames = gen.source
            elif isinstance(gen.source.unwrap(), list):
                filenames = list(gen.source.unwrap())
            else:
                raise InvalidInitException("invalid generator declaration")

            if gen.flags:
                flags += gen.flags
            if not args and gen.args:
                args += gen.args

            time_limit = gen.time_limit or time_limit
            memory_limit = gen.memory_limit or memory_limit
            compiler_time_limit = gen.compiler_time_limit or compiler_time_limit
            lang = gen.language

        if not isinstance(filenames, list):
            filenames = [filenames]

        filenames = [os.path.join(base, name) for name in filenames]
        executor = self.problem.generator_manager.get_generator(
            filenames,
            flags,
            lang=lang,
            compiler_time_limit=compiler_time_limit)

        # convert all args to str before launching; allows for smoother int passing
        args = map(str, args)

        # setting large buffers is really important, because otherwise stderr is unbuffered
        # and the generator begins calling into cptbox Python code really frequently
        proc = executor.launch(*args,
                               time=time_limit,
                               memory=memory_limit,
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               stderr_buffer_size=65536,
                               stdout_buffer_size=65536)

        try:
            input = self.problem.problem_data[
                self.config['in']] if self.config['in'] else None
        except KeyError:
            input = None

        stdout, stderr = proc.unsafe_communicate(input)
        self._generated = list(map(self._normalize, (stdout, stderr)))

        if proc.tle:
            raise InternalError('generator timed out (> %s seconds)' %
                                time_limit)
        if proc.mle:
            raise InternalError('generator ran out of memory (> %s Kb)' %
                                memory_limit)
        if proc.protection_fault:
            syscall, callname, args = proc.protection_fault
            raise InternalError(
                'generator invoked disallowed syscall %s (%s)' %
                (syscall, callname))
        if proc.returncode:
            error = 'generator exited with nonzero code %s' % proc.returncode
            # To get the feedback, we need a Result object, but we lack a Case object
            # So we set it to None because we don't need to access it
            result = Result(None)
            result.set_result_flag(proc)
            feedback = (proc.feedback
                        if hasattr(executor, 'feedback') and proc.feedback else
                        (getattr(executor, 'get_feedback', lambda x, y, z: '')(
                            stderr, result, proc)))
            if feedback:
                error += ' with feedback: %s' % feedback
            raise InternalError(error)
Beispiel #27
0
 def __init__(self, judge, problem, language, source, meta):
     self.judge = judge
     self.mod = load_module_from_file(
         os.path.join(get_problem_root(problem.id),
                      problem.config['custom_judge']))
     self._grader = self.mod.Grader(judge, problem, language, source, meta)
Beispiel #28
0
 def load_checker(self, name):
     if name in self._checkers:
         return self._checkers[name]
     self._checkers[name] = checker = load_module_from_file(
         os.path.join(get_problem_root(self.id), name))
     return checker
Beispiel #29
0
    def run_test(self, problem_id: str, config: Dict[str, Any]) -> int:
        if 'targets' in config and not self._check_targets(config['targets']):
            return 0

        return self._run_test_case(problem_id, get_problem_root(problem_id),
                                   config)