示例#1
0
    def test_deny_system_call(self):
        result = sandbox.run('system_call')
        self.assertEqual(result["result"], sandbox.RESULT_SUCCESS)

        result = sandbox.run('system_call', seccomp_rule=sandbox.RULE_CLIKE)
        self.assertEqual(result["result"], sandbox.RESULT_RUNTIME_ERROR)
        self.assertEqual(result["signal"], 31)  # SIGSYS
示例#2
0
def execute():
    ip = request.forms.get('ip')
    if len(ip) > 40:
        return template('index', output='IP address is too long.')

    cmd_type = int(request.params['cmdtype'])

    output_text = ''
    if cmd_type == 0:
        try:
            for x in ip.split('.'):
                output_text += bin(int(x))[2:] + '.'
            output_text = output_text[:-1]
        except Exception:
            output_text = 'Invalid ip address'
    if cmd_type == 1:
        try:
            output_text = socket.gethostbyname(ip)
        except socket.gaierror as e:
            output_text = e.args[1]
    elif cmd_type == 2:
        cmd = 'traceroute ' + ip
        print('cmd: ' + cmd)
        output_text = sandbox.run(cmd)
        print('output_text: ' + output_text)

    return template('index', output=output_text)
示例#3
0
def run_compile(op: Operation = None) -> OperationResult:
    # print(json.dumps(asdict(op), indent=2))
    if op is None:
        return OperationResult(success=False, info={'message': 'Empty operation'})
    config = op.config
    err_file = os.path.join(config.root_dir, 'compile.stderr.log')
    res = sandbox.run(command=config.command,
                      max_cpu_time=config.max_cpu_time,
                      max_real_time=config.max_real_time,
                      max_memory=config.max_memory,
                      memory_check_only=config.memory_check_only,
                      max_stack=config.max_stack,
                      max_output_size=config.max_output_size,
                      max_process=config.max_process,
                      input_file=None,
                      output_file=None,
                      err_file=err_file,
                      log_file='/dev/null',
                      file_io=None,
                      env=config.env,
                      uid=None,
                      gid=None,
                      seccomp_rule=config.seccomp_rule
                      )
    # print(res)
    # print(json.dumps(res, indent=2))
    with open(err_file, 'r') as err_fd:
        res['stderr'] = err_fd.read()
    if res.__contains__('result'):
        success = res['result'] == ResultCode.ACCEPTED
    else:
        success = False
    return OperationResult(success=success, info=from_dict(data_class=RunnerResult,
                                                           data=res))
示例#4
0
    def run(self, id, name, time, memory, fin=None, fout=None):
        null, zero = open('/dev/null', 'w'), open('/dev/zero', 'r')
        err = open(self.sandbox(self.err_log), 'w')
        fin = self.sandbox(fin) if fin else fin
        fout = self.sandbox(fout) if fout else fout
        sandbox = SolutionsSandbox(**{
            'args': name,
            'stdin': open(fin, 'r') if fin else zero,
            'stdout': open(fout, 'w') if fout else null,
            'stderr': err,
            'quota': {
                'wallclock': 30000,
                'cpu': time,
                'memory': memory,
                'dick': 1024 * 1024 * 10,  # 10 MB
            }
        })
        sandbox.run()
        probe_result = sandbox.probe()
        null.close()
        zero.close()
        err.close()

        sh.rm(name).wait()
        short_result = self.result(probe_result)
        with open(self.sandbox(self.err_log), 'r') as f:
            err_result = f.read()
        sh.rm(self.sandbox(self.err_log)).wait()
        if fout:
            with open(fout, 'r') as f:
                detail_result = f.read()
            sh.rm(fout).wait()
        else:
            detail_result = err_result

        return (short_result, detail_result, err_result)
示例#5
0
 def run_a_and_b(code, report_model, report_model_path, read_pipe,
                 write_pipe, close_pipes):
     for pipe in close_pipes:
         os.close(pipe)
     r, w = os.fdopen(read_pipe, 'r'), os.fdopen(write_pipe, 'w')
     p = run(code.language,
             code.workspace,
             stdin=r,
             stdout=w,
             time_limit=time_limit)
     r.close()
     w.close()
     report_model.finish_time = timezone.now()
     report_model.time_consumption = p["time"]
     report_model.return_code = p["exit_code"]
     with open(report_model_path, "wb") as report_model_writer:
         pickle.dump(report_model, report_model_writer)
     os._exit(0)
示例#6
0
def run_three_programs(a: Code, b: Code, c: Code, time_limit: int):
    """
    Basically same as run_two_programs. c is judge
    """
    if a.is_compiled != 1 or b.is_compiled != 1 or c.is_compiled != 1:
        raise ValueError("Three programs should be compiled")

    r1, w1 = os.pipe()  # judge read, code_a write
    r2, w2 = os.pipe()  # code_a read, judge write
    r3, w3 = os.pipe()  # judge read, code_b write
    r4, w4 = os.pipe()  # coee_b read, judge write

    report_workspace = os.path.join(settings.DATA_DIR, timestamp_onlydigit())
    os.makedirs(report_workspace, exist_ok=True)
    report_file_path = os.path.join(report_workspace, "report")
    report_model_a_path = os.path.join(report_workspace, "model_a")
    report_model_b_path = os.path.join(report_workspace, "model_b")

    report_model_a = RunningReport.objects.create(code=a)
    report_model_b = RunningReport.objects.create(code=b)
    report_model_c = RunningReport.objects.create(code=c)

    interactor_pid = os.fork()
    if interactor_pid == 0:
        # Forks two programs to run a and b
        os.close(r1)
        os.close(w2)
        os.close(r3)
        os.close(w4)

        def run_a_and_b(code, report_model, report_model_path, read_pipe,
                        write_pipe, close_pipes):
            for pipe in close_pipes:
                os.close(pipe)
            r, w = os.fdopen(read_pipe, 'r'), os.fdopen(write_pipe, 'w')
            p = run(code.language,
                    code.workspace,
                    stdin=r,
                    stdout=w,
                    time_limit=time_limit)
            r.close()
            w.close()
            report_model.finish_time = timezone.now()
            report_model.time_consumption = p["time"]
            report_model.return_code = p["exit_code"]
            with open(report_model_path, "wb") as report_model_writer:
                pickle.dump(report_model, report_model_writer)
            os._exit(0)

        new_pid = os.fork()
        if new_pid == 0:
            # Code a
            run_a_and_b(a, report_model_a, report_model_a_path, r2, w1,
                        [w3, r4])
        else:
            # Code b
            run_a_and_b(b, report_model_b, report_model_b_path, r4, w3,
                        [w1, r2])
            os.wait4(new_pid, os.WSTOPPED)
    else:
        os.close(w1)
        os.close(r2)
        os.close(w3)
        os.close(r4)
        fcntl.fcntl(r1, fcntl.F_SETFD, 0)
        fcntl.fcntl(w2, fcntl.F_SETFD, 0)
        fcntl.fcntl(r3, fcntl.F_SETFD, 0)
        fcntl.fcntl(w4, fcntl.F_SETFD, 0)

        with open(report_file_path, "w") as report_writer:
            p = run(c.language,
                    c.workspace,
                    add_args=[str(r1), str(w2),
                              str(r3), str(w4)],
                    stdout=report_writer,
                    time_limit=time_limit)
        with open(report_file_path, "r") as report_reader:
            report_model_c.raw_output = report_reader.read()
            report_model_c.finish_time = timezone.now()
            report_model_c.time_consumption = p["time"]
            report_model_c.return_code = p["exit_code"]
            report_model_c.save()

        os.wait4(interactor_pid, os.WSTOPPED)

        with open(report_model_a_path, "rb") as report_model_reader:
            report_model_a = pickle.load(report_model_reader)
            report_model_a.save()
        with open(report_model_b_path, "rb") as report_model_reader:
            report_model_b = pickle.load(report_model_reader)
            report_model_b.save()

    return report_model_a, report_model_b, report_model_c
示例#7
0
 def test_runtime_error(self):
     result = sandbox.run('test_runtime_error')
     self.assertEqual(result["result"], sandbox.RESULT_RUNTIME_ERROR)
     self.assertEqual(result["exit_code"], 1)
示例#8
0
 def test_output_limit(self):
     stdout = os.path.join(self.directory, 'stdout.txt')
     result = sandbox.run('test_output_limit',
         max_output_size=1048576,
         stdout=stdout)
     self.assertEqual(result["result"], sandbox.RESULT_RUNTIME_ERROR)
示例#9
0
def main():
  sandbox.register("line_stats.parseFile", parse_file)
  sandbox.run()
示例#10
0
    def test_stderr(self):
        stderr = os.path.join(self.directory, 'stderr.txt')
        result = sandbox.run('test_stderr', stderr=stderr)

        with open(stderr) as f:
            self.assertEqual(f.read(), 'stderr message')
示例#11
0
    def test_stdout(self):
        stdout = os.path.join(self.directory, 'stdout.txt')
        result = sandbox.run('test_stdout', stdout=stdout)

        with open(stdout) as f:
            self.assertEqual(f.read(), 'stdout message')
示例#12
0
 def test_success(self):
     result = sandbox.run('test_success')
     self.assertEqual(result["result"], sandbox.RESULT_SUCCESS)
示例#13
0
    elif sub_task_type == 'sum':
        score = int(sub_task.score * (ac_count / len(sub_task.cases)))
    return SubTaskResult(sub_task_id=sub_task.sub_task_id,
                         type=sub_task_type, score=score, cases=cases)


def run_case(config: RunConfig = None, case: Case = None) -> Optional[CaseResult]:
    if case is None or config is None:
        return None
    # print(case)
    err_file = os.path.join(config.root_dir, 'stderr')
    res = sandbox.run(command=config.command,
                      max_cpu_time=config.max_cpu_time,
                      max_real_time=config.max_real_time,
                      max_memory=config.max_memory,
                      memory_check_only=config.memory_check_only,
                      max_stack=config.max_stack,
                      max_output_size=config.max_output_size,
                      max_process=config.max_process,
                      input_file=case.input_name,
                      output_file=os.path.join(config.root_dir, 'stdout'),
                      err_file=err_file,
                      log_file=os.path.join(config.root_dir, 'log.log'),
                      file_io=False,
                      env=config.env,
                      uid=None,
                      gid=None,
                      seccomp_rule=config.seccomp_rule
                      )
    output_str = None
    res['result'] = ResultCode(res['result'])
示例#14
0
def run():
    sandbox.run()
示例#15
0
def run_two_programs(a: Code, b: Code, time_limit: int):
    """
    :param a: submitted code
    :param b: interactor (judge)

    judge should follow the following protocol:
    stdin: abandoned
    stdout: report
    arg[1]: read, file descriptor
    arg[2]: write, file descriptor

    possibly arg[3] and arg[4] in run_three_programs
    arg[3]: read, file descriptor
    arg[4]: write, file descriptor

    :return: (RunningReport a, RunningReport b)
    """
    if a.is_compiled != 1 or b.is_compiled != 1:
        raise ValueError("Two programs should be compiled")

    r1, w1 = os.pipe()  # interactor read, submission write
    r2, w2 = os.pipe()  # submission read, interactor write

    report_workspace = os.path.join(settings.DATA_DIR, timestamp_onlydigit())
    os.makedirs(report_workspace, exist_ok=True)
    report_file_path = os.path.join(report_workspace, "report")
    report_model_path = os.path.join(report_workspace, "model")

    report_model_a = RunningReport.objects.create(code=a)
    report_model_b = RunningReport.objects.create(code=b)

    interactor_pid = os.fork()
    if interactor_pid == 0:
        # This is the child process for interactor running usage
        os.close(w1)
        os.close(r2)
        fcntl.fcntl(r1, fcntl.F_SETFD, 0)  # set close-on-exec flag 0
        fcntl.fcntl(w2, fcntl.F_SETFD, 0)

        with open(report_file_path, "w") as report_writer:
            p = run(b.language,
                    b.workspace,
                    add_args=[str(r1), str(w2)],
                    stdout=report_writer,
                    time_limit=time_limit)
        with open(report_file_path, "r") as report_reader:
            report_model_b.raw_output = report_reader.read()
            report_model_b.finish_time = timezone.now()
            report_model_b.time_consumption = p["time"]
            report_model_b.return_code = p["exit_code"]
        with open(report_model_path, "wb") as report_model_writer:
            pickle.dump(report_model_b, report_model_writer)
        os._exit(0)
    else:
        # This is the parent process for submission
        os.close(r1)
        os.close(w2)
        r, w = os.fdopen(r2, 'r'), os.fdopen(w1, 'w')
        p = run(a.language,
                a.workspace,
                stdin=r,
                stdout=w,
                time_limit=time_limit)
        r.close()
        w.close()
        report_model_a.finish_time = timezone.now()
        report_model_a.time_consumption = p["time"]
        report_model_a.return_code = p["exit_code"]
        report_model_a.save()

        os.wait4(interactor_pid, os.WSTOPPED)
        with open(report_model_path, "rb") as report_model_reader:
            report_model_b = pickle.load(report_model_reader)
            report_model_b.save()

    return report_model_a, report_model_b
示例#16
0
 def test_real_timeout(self):
     result = sandbox.run('test_real_timeout', max_real_time=1000)
     self.assertEqual(result["result"], sandbox.RESULT_REAL_TIME_LIMIT_EXCEEDED)
     self.assertGreaterEqual(result["real_time"], 1000)
示例#17
0
文件: vortex.py 项目: croxis/apollo
sandbox.base.taskMgr.add(taskUpdate, "SpinCameraTask")

# Create buffers
vortex_buffer = sandbox.base.win.makeTextureBuffer("Vortex Buffer", 256, 256)
#vortex_texture = vortex_buffer.getTexture()
vortex_buffer.setSort(-100)
vortex_camera = sandbox.base.makeCamera(vortex_buffer)
vortex_scene = NodePath("Vortex Scene")
vortex_camera.reparentTo(vortex_scene)

#inside_vortex.setTexture(vortex_texture, 1)
#vortex.reparentTo(vortex_scene)
vortex.reparentTo(sandbox.base.render)

#alight = AmbientLight('alight')
#alnp = vortex_scene.attachNewNode(alight)
#alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
#vortex_scene.setLight(alnp)


def screenshot():
    sandbox.base.screenshot('/home/croxis/vortex/')


sandbox.base.accept("v", sandbox.base.bufferViewer.toggleEnable)
sandbox.base.accept("V", sandbox.base.bufferViewer.toggleEnable)

sandbox.base.accept("s", screenshot)

sandbox.run()
示例#18
0
 def test_memory_limit_1(self):
     result = sandbox.run('test_memory_limit_1',
         max_memory=1048576*16,
         memory_check_only=True)
     self.assertEqual(result["result"], sandbox.RESULT_MEMORY_EXCEEDED)
     self.assertGreaterEqual(result["memory"], 1048576 * 16)
示例#19
0
文件: main.py 项目: croxis/apollo
    for bod in sandbox.getSystem(solarSystem.SolarSystemSystem).bodies:
        log.debug(bod.getName() + ": " + str(bod.getPos()))
    return task.again


def loginDebug(task):
    #sandbox.getSystem(clientNet.NetworkSystem).sendLogin(universals.username, "Hash Password")
    sandbox.send('login', [('127.0.0.1', 1999)])
    #return task.again
    return task.done


def spawnDebug(task):
    shipSystem.spawnShip("The Hype", "Hyperion", universals.spawn, True)
    spawnPoint = LPoint3d(universals.spawn)
    spawnPoint.addX(3)
    shipSystem.spawnShip("Boo boo", "Hyperion", spawnPoint)

#sandbox.base.taskMgr.doMethodLater(10, planetPositionDebug, "Position Debug")
if universals.runClient:
    sandbox.base.taskMgr.doMethodLater(1, loginDebug, "Login Debug")

if universals.runServer:
    sandbox.base.taskMgr.doMethodLater(1, spawnDebug, "Spawn Debug")


log.info("Setup complete.")
sandbox.run()

##TODO: FIX BULLET PHYSICS AND SOLAR SYSTE RENDER TO PROPERLY USE ROOT SOLAR SYSTEM NODE
示例#20
0
def main():
    eng = engine.Engine()

    @export
    def apply_user_actions(action_reprs):
        action_group = eng.apply_user_actions(
            [useractions.from_repr(u) for u in action_reprs])
        return eng.acl_split(action_group).to_json_obj()

    @export
    def fetch_table(table_id, formulas=True, query=None):
        return actions.get_action_repr(
            eng.fetch_table(table_id, formulas=formulas, query=query))

    @export
    def fetch_table_schema():
        return eng.fetch_table_schema()

    @export
    def fetch_snapshot():
        action_group = eng.fetch_snapshot()
        return eng.acl_split(action_group).to_json_obj()

    @export
    def autocomplete(txt, table_id):
        return eng.autocomplete(txt, table_id)

    @export
    def find_col_from_values(values, n, opt_table_id):
        return eng.find_col_from_values(values, n, opt_table_id)

    @export
    def fetch_meta_tables(formulas=True):
        return {
            table_id: actions.get_action_repr(table_data)
            for (table_id,
                 table_data) in eng.fetch_meta_tables(formulas).iteritems()
        }

    @export
    def load_meta_tables(meta_tables, meta_columns):
        return eng.load_meta_tables(
            table_data_from_db("_grist_Tables", meta_tables),
            table_data_from_db("_grist_Tables_column", meta_columns))

    @export
    def load_table(table_name, table_data):
        return eng.load_table(table_data_from_db(table_name, table_data))

    @export
    def create_migrations(all_tables, metadata_only=False):
        doc_actions = migrations.create_migrations(
            {
                t: table_data_from_db(t, data)
                for t, data in all_tables.iteritems()
            }, metadata_only)
        return map(actions.get_action_repr, doc_actions)

    @export
    def get_version():
        return schema.SCHEMA_VERSION

    @export
    def get_formula_error(table_id, col_id, row_id):
        return objtypes.encode_object(
            eng.get_formula_error(table_id, col_id, row_id))

    export(parse_acl_formula)
    export(eng.load_empty)
    export(eng.load_done)

    sandbox.run()