def create_nodes(*args, **kwargs):
    """Create nodes to the order of magnitude specified by kwargs.magnitude."""
    if len(args) > 0 or "mul_fact" in kwargs:
        if len(args) != 0:
            suffix = args[0]
        else:
            suffix = kwargs["mul_fact"]
        suffix = str(suffix)
        # First create the dependency node.
        f = open(os.path.join(DATA_DIR, "dependency.json"))
        dependency_node = decode_json(f.read())
        CreateNodesInGraph.create_dependency_node(
            dependency_name=dependency_node["dependency_name"] + "_" + suffix,
            dependency_path=dependency_node["dependency_path"] + "_" + suffix,
        )
        # Now create two versions for the node.
        f = open(os.path.join(DATA_DIR, "dependency_version.json"))
        dependency_version = decode_json(f.read())
        # Modify this if the number of versions of a dependency needs to be changed.
        for i in (0, 2):
            CreateNodesInGraph.create_dependency_version_node(
                version="{}_{}_{}".format(dependency_version["version"],
                                          suffix, str(i)),
                dep_name="{}_{}_{}".format(
                    dependency_version["dependency_name"], suffix, str(i)),
            )
        # Create a security event node. This will be equal to the number of dependency nodes.
        f = open(os.path.join(DATA_DIR, "security_event.json"))
        security_event = decode_json(f.read())
        CreateNodesInGraph.create_security_event_node(
            event_id=security_event["event_id"],
            event_type=security_event["event_type"],
            body=security_event["body"],
            title=security_event["title"],
        )
        # Create a probable vulnerability node. This will be half of all security event nodes.
        if i % 2 == 0:
            CreateNodesInGraph.create_probable_vuln_node("PCVE-20XX-{}".format(
                str(i)))
        # Create an identified CVE node for every alternate probable CVE node. This means there'll
        # be half the number of identified CVE nodes as probale CVE nodes.
        if i % 4 == 0:
            f = open(os.path.join(DATA_DIR, "identified_cve.json"))
            identified_cve = decode_json(f.read())
            CreateNodesInGraph.create_reported_cve_node(
                cve_id=identified_cve["cve_id"],
                cvss=identified_cve["cvss"],
                severity=Severity(identified_cve["severity"].lower()),
            )
Example #2
0
def srv_loop():
    address = (HOST, PORT)
    clients = []
    sock = start_server(address)

    while True:
        try:
            client, address = sock.accept()
        except OSError as e:
            pass
        else:
            print(f'Connected with {str(address)}')
            clients.append(client)
        finally:
            w = []
            try:
                r, w, e = select.select([], clients, [], 0)
            except Exception as e:
                pass

        for s_client in w:
            srv_response = encode_json(response(200))
            try:
                data = s_client.recv(1024)
                print(decode_json(data))
                s_client.send(srv_response)
            except:
                clients.remove(s_client)
Example #3
0
    def onNotification(self, sender, method, data):  # pylint: disable=invalid-name
        ''' Notification event handler for accepting data from add-ons '''
        if not method.endswith(
                'upnext_data'):  # Method looks like Other.upnext_data
            return

        data, encoding = decode_json(data)
        data.update(id='%s_play_action' % sender.replace('.SIGNAL', ''))
        self.api.addon_data_received(data, encoding=encoding)
Example #4
0
    def onNotification(self, sender, method, data):  # pylint: disable=invalid-name
        """Notification event handler for accepting data from add-ons"""
        if not method.endswith('upnext_data'):  # Method looks like Other.upnext_data
            return

        decoded_data, encoding = decode_json(data)
        if decoded_data is None:
            self.log('Received data from sender %s is not JSON: %s' % (sender, data), 2)
            return

        decoded_data.update(id='%s_play_action' % sender.replace('.SIGNAL', ''))
        self.api.addon_data_received(decoded_data, encoding=encoding)
Example #5
0
    def cast(val):
      if isinstance(val, basestring):
        if self.is_base64(val):
          mimetype = self.get_base64_mimetype(val)
          base64_string = self.get_base64_content(val)
          base64_decoded = base64.b64decode(base64_string)
          base64_string = None
          bucket = os.environ.get('GCS_BUCKET_NAME',
            app_identity.get_default_gcs_bucket_name())
          filename = utils.get_random_string(12)
          filename += str(calendar.timegm(time.gmtime()))

          import cloudstorage as gcs

          acl = None
          if self.acl:
            acl = {'x-goog-acl': self.acl}
          with gcs.open('/%s/%s' % (bucket, filename), 'w',
                        content_type=mimetype, options=acl) as gcs_file:
            gcs_file.write(base64_decoded)

          if self._local:
            return '%s/%s/%s' % (self.base_path, bucket, filename)
          access_token, _ = self.get_access_token()
          medialink_url = '%s/%s/o/%s' % (self.base_path, bucket, filename)
          medialink = urlfetch.fetch(
            method=urlfetch.GET,
            url=medialink_url,
            headers={
              'Authorization': 'Bearer %s' % access_token,
            })
          if medialink.status_code >= 200 and medialink.status_code < 300:
            return utils.decode_json(medialink.content)['mediaLink']
          return None
        elif URL_REGEX.findall(val):
          return val
        elif LOCAL_URL_REGEX.findall(val):
          return val
        else:
          raise datastore_errors.BadValueError('Expected Base64 string or '
                                               'URL, got %r.' % val)
      else:
        raise datastore_errors.BadValueError('Expected string, got %r.' % val)
Example #6
0
def test_decode_json():
    assert isinstance(decode_json(b'{"a": 1, "b": "string"}'), dict)
def create_nodes(*args, **kwargs):
    """Create nodes to the order of magnitude specified by kwargs.magnitude."""
    if len(args) > 0 or "mul_fact" in kwargs:
        if len(args) != 0:
            suffix = args[0]
        else:
            suffix = kwargs["mul_fact"]
        index = suffix
        suffix = str(suffix)
        # First create the dependency node.
        f = open(os.path.join(DATA_DIR, "dependency.json"))
        dependency_node = decode_json(f.read())
        dependency_node["dependency_name"] += "_" + suffix
        dependency_node["dependency_path"] += "_" + suffix
        CreateNodesInGraph.create_dependency_node(
            dependency_name=dependency_node["dependency_name"],
            dependency_path=dependency_node["dependency_path"],
        )
        # Now create two versions for the node.
        f = open(os.path.join(DATA_DIR, "dependency_version.json"))
        dependency_version = decode_json(f.read())
        # Modify this if the number of versions of a dependency needs to be changed.
        for i in range(0, 2):
            CreateNodesInGraph.create_dependency_version_node(
                version="{}_{}".format(dependency_version["version"], str(i)),
                dep_name="{}_{}".format(dependency_version["dependency_name"],
                                        suffix),
            )
        # Since this iterates internally, passing one is enough.
        dependency_version["dependency_name"] = "{}_{}".format(
            dependency_version["dependency_name"], suffix)
        CreateEdges.create_dependency_version_edge(dependency_node,
                                                   dependency_version)
        # Create a security event node. This will be equal to the number of dependency nodes.
        f = open(os.path.join(DATA_DIR, "security_event.json"))
        security_event = decode_json(f.read())
        CreateNodesInGraph.create_security_event_node(
            event_id=security_event["event_id"],
            event_type=security_event["event_type"],
            body=security_event["body"],
            title=security_event["title"],
        )
        probable_vuln = None
        # Create a probable vulnerability node. This will be half of all security event nodes.
        if index % 2 == 0:
            f = open(os.path.join(DATA_DIR, "probable_vulnerability.json"))
            probable_vuln = decode_json(f.read())
            CreateNodesInGraph.create_probable_vuln_node(
                probable_vuln["probable_vuln_id"])
            # Link this probable vulnerability to the security event node.
            CreateEdges.create_prob_vuln_sec_event_link(
                probable_vuln, security_event)
        # Create an identified CVE node for every alternate probable CVE node. This means there'll
        # be half the number of identified CVE nodes as probale CVE nodes.
        if index % 4 == 0:
            f = open(os.path.join(DATA_DIR, "identified_cve.json"))
            identified_cve = decode_json(f.read())
            CreateNodesInGraph.create_reported_cve_node(
                cve_id=identified_cve["CVE_ID"],
                cvss=identified_cve["CVSS"],
                severity=identified_cve["severity"],
            )
            # Link this identified CVE node to the dependency node with version suffix {1}.
            dependency_version["version"] = "{}_{}".format(
                dependency_version["version"], str(1))
            CreateEdges.create_reported_cve_dependency_version_link(
                identified_cve, dependency_version)
            # Also link it to the probable CVE node.
            if probable_vuln is not None:
                CreateEdges.create_probable_reported_cve_link(
                    probable_vuln, identified_cve)
Example #8
0
            logger.debug("Processing document [%s]", document.get_id())
            start = out.tell()
            out.write("#DOCUMENT\t%s\n" % (document.get_id()))
            process("HEAD", document, out)
            process("TEXT", document, out)
            #print(document["HEAD"].textContent(), document["TEXT"].textContent())
            outptr.write("%s\t%d\t%d\n" %
                         (document.get_id(), start, out.tell()))
            logger.debug("Processed document [%s]", document.get_id())


for collection in args.collections:
    l = collection.split("#")
    if args.json and os.path.exists(collection):
        s = open(collection).read()
        descriptions = decode_json(s)
    else:
        restrict = ""
        if len(l) == 1:
            pass
        elif len(l) == 2:
            restrict = l[1]
            collection = l[0]
        else:
            raise Exception("Cannot handle collection specification [%s]" % collection)
        descriptions = collections(collection, restrict)

    for description in descriptions:
        collection_id = description["id"]
        donefilepath = os.path.join(args.outdir, "%s.done" % collection_id)
Example #9
0
def import_from_syzoj(url: str, willPublic: bool):
    """
    从SYZOJ导入题目
    参数:
    url:str SYZOJ题目URL
    willPublic:int 新题目是否公开
    返回
    {
        "code":0,
        "uuid":'用于websocket的uuid',
        "message":""
    }
    """
    import urllib
    import tempfile
    import pathlib
    import traceback
    import zipfile
    import shutil
    import os
    import yaml
    import requests
    from io import BytesIO
    from utils import decode_json
    if not session.get("uid"):
        return make_response(-1, message="请先登录")
    user: User = User.by_id(session.get("uid"))
    if not permission_manager.has_any_permission(user.id, "problem.create", "problem.manage"):
        return make_response(-1, message="你没有权限执行此操作")
    try:

        with requests.get(f"{url}/export") as urlf:
            data = decode_json(urlf.content.decode())["obj"]
        print("JSON data: {}".format(data))
        import datetime
        problem = Problem(uploader_id=user.id,
                          title=data["title"],
                          content=data["description"],
                          input_format=data["input_format"],
                          output_format=data["output_format"],
                          hint=data["limit_and_hint"],
                          using_file_io=data["file_io"],
                          input_file_name=data["file_io_input_name"],
                          output_file_name=data["file_io_output_name"],
                          create_time=datetime.datetime.now()
                          )
        if willPublic:
            if not permission_manager.has_any_permission(user.id, "problem.publicize", "problem.manage"):
                return make_response(-1, message="你没有权限公开题目")
            problem.public = True
        problem.example = []
        problem.hint = "### 样例\n" + \
            data["example"]+"\n\n### Hint\n"+problem.hint
        time_limit = int(data["time_limit"])
        memory_limit = int(data["memory_limit"])
        db.session.add(problem)
        db.session.commit()

        work_dir = pathlib.PurePath(tempfile.mkdtemp())
        with requests.get(f"{url}/testdata/download") as urlf:
            pack = zipfile.ZipFile(BytesIO(urlf.content))
            pack.extractall(work_dir)
            pack.close()
        problem_data_dir = pathlib.PurePath(
            f"{config.UPLOAD_DIR}/{problem.id}")
        shutil.rmtree(problem_data_dir, ignore_errors=True)
        shutil.copytree(work_dir, problem_data_dir)
        shutil.rmtree(work_dir)
        # 更换新的word_dir
        work_dir = problem_data_dir
        for file in filter(lambda x: x.endswith(".lock"), os.listdir(work_dir)):
            os.remove(work_dir/file)
        file_list = []
        for file in filter(lambda x: not x.endswith(".lock"), os.listdir(work_dir)):
            with open(work_dir/(file+".lock"), "w") as f:
                import time
                last_modified_time = time.time()
                f.write(str(last_modified_time))
            file_list.append({
                "name": file, "size": os.path.getsize(work_dir/file), "last_modified_time": last_modified_time
            })
        problem.files = file_list
        pure_file_list = list(map(lambda x: x["name"], file_list))

        for x in pure_file_list:
            if x.startswith("spj_"):
                problem.spj_filename = x
                break
        auto_generate = True
        subtasks = []
        if os.path.exists(work_dir/"data.yml"):
            # 存在data.yml
            with open(work_dir/"data.yml", "r", encoding="utf-8") as f:
                data_obj = yaml.safe_load(f)
                # data.yml中钦定spj

                if "specialJudge" in data_obj:
                    new_spj_filename = work_dir/(
                        "spj_"+data_obj["specialJudge"]["language"]+"."+data_obj["specialJudge"]["fileName"].split(".")[-1])
                    print(new_spj_filename)
                    print(work_dir/data_obj["specialJudge"]["fileName"])
                    shutil.move(
                        work_dir/data_obj["specialJudge"]["fileName"], new_spj_filename)
                    problem.spj_filename = new_spj_filename.name
                if "subtasks" in data_obj:
                    auto_generate = False

                    def make_input(x):
                        return data_obj["inputFile"].replace("#", str(x))

                    def make_output(x):
                        return data_obj["outputFile"].replace("#", str(x))

                    for i, subtask in enumerate(data_obj["subtasks"]):
                        print(subtask)
                        subtasks.append({
                            "name": f"Subtask{i+1}",
                            "score": int(subtask["score"]),
                            "method": subtask["type"],
                            "time_limit": time_limit,
                            "memory_limit": memory_limit,
                            "testcases": []
                        })
                        testcases = subtasks[-1]["testcases"]
                        score = subtasks[-1]["score"]//len(subtask["cases"])
                        for testcase in subtask["cases"]:
                            testcases.append({
                                "input": make_input(testcase),
                                "output": make_output(testcase),
                                "full_score": score
                            })
                        testcases[-1]["full_score"] = subtasks[-1]["score"] - \
                            score*(len(testcases)-1)
        if auto_generate:
            # 不存在data.yml,直接生成子任务
            input_files = list(
                filter(lambda x: x.endswith(".in"), pure_file_list))
            output_files = list(
                filter(lambda x: x.endswith(".out") or x.endswith(".ans"), pure_file_list))
            if len(input_files) == len(output_files):
                pass
            for i, file in enumerate(input_files):
                pure_file_name = file[:file.rindex(".")]
                subtasks.append({
                    "name": f"Subtask{i+1}",
                    "score": 100//len(input_files),
                    "method": "sum",
                    "time_limit": time_limit,
                    "memory_limit": memory_limit,
                    "testcases": [{"full_score": 100//len(input_files), "input": file, "output": f"{pure_file_name}.ans" if f"{pure_file_name}.ans" in output_files else f"{pure_file_name}.out"}],
                    "comment": ""
                })
            diff = 100-sum(map(lambda x: x["score"], subtasks))
            subtasks[-1]["score"] += diff
            subtasks[-1]["testcases"][0]["full_score"] += diff
        for file in filter(lambda x: x.endswith(".lock"), os.listdir(work_dir)):
            os.remove(work_dir/file)
        for file in filter(lambda x: not x.endswith(".lock"), os.listdir(work_dir)):
            with open(work_dir/(file+".lock"), "w") as f:
                import time
                last_modified_time = time.time()
                f.write(str(last_modified_time))
        problem.files = generate_file_list(problem.id)
        problem.subtasks = subtasks
        db.session.commit()
    except Exception:
        print(traceback.format_exc())
        return make_response(-1, message=traceback.format_exc())
    return make_response(0, problem_id=problem.id)
Example #10
0
def cli_loop(message):
    address = (HOST, PORT)
    sock = start_client(address)
    sock.send(message)
    data = sock.recv(1024)
    print(decode_json(data))
Example #11
0
def message_for_room(request):
    data = decode_json(json.loads(request))
    room = data['room']
    send(json.dumps(json.dumps(data)), room=room)
Example #12
0
def on_leave(request):
    data = decode_json(json.loads(request))
    username = data['username']
    room = data['room']
    leave_room(room)
    send(username + ' has left the room.', room=room)
Example #13
0
def on_join(request):
    data = decode_json(json.loads(request))
    username = data['username']
    room = data['room']
    join_room(room)
    send(json.dumps('{0} has entered the room.'.format(username)), room=room)
def remote_judge_submit(data):
    """
    客户端提交代码
    {
        "problemID":hj2题目ID,
        "remoteAccountID":"远程用户ID",
        "code":"用户代码",
        "language":"用户选择的语言",
        "loginCaptcha":"登录验证码",
        "submitCaptcha":"提交验证码",
        "contestID":"提交为某场比赛,-1设置为不提交比赛",
        "contestProblemID":"比赛题目ID"
    }
    """
    if not permission_manager.has_permission(session.get("uid"),
                                             "remote_judge.use"):
        emit("server_response", {
            "ok": False,
            "message": "你没有权限这样做"
        },
             room=request.sid)
        return
    problem: Problem = Problem.by_id(data["problemID"])
    contest_id = data["contestID"]
    if contest_id != -1:
        contest: Contest = db.session.query(Contest).filter(
            Contest.id == contest_id).one_or_none()
        if not contest:
            emit("server_response", {
                "ok": False,
                "message": "非法比赛ID"
            },
                 room=request.sid)
            return
        if len(contest.problems) <= data["contestProblemID"]:
            emit("server_response", {
                "ok": False,
                "message": "非法比赛题目ID"
            },
                 room=request.sid)
            return
        if not contest.running():
            emit("server_response", {
                "ok": False,
                "message": "比赛未在进行"
            },
                 room=request.sid)
            return
        problem: Problem = Problem.by_id(
            contest.problems[data["contestProblemID"]]["id"])
    else:
        if not problem.public and int(
                session.get("uid")
        ) != problem.uploader_id and not permission_manager.has_permission(
                session.get("uid"), "problem.manage"):
            emit("server_response", {
                "ok": False,
                "message": "你没有权限使用此题目"
            },
                 room=request.sid)
            return
    if len(data["code"]) > config.MAX_CODE_LENGTH:
        emit("server_response", {
            "ok": False,
            "message": "代码过长"
        },
             room=request.sid)
        return
    remote_account: RemoteAccount = db.session.query(RemoteAccount).filter(
        RemoteAccount.account_id == data["remoteAccountID"]).one_or_none()
    if not remote_account or remote_account.uid != int(session.get("uid")):
        emit("server_response", {
            "ok": False,
            "message": "非法账户ID"
        },
             room=request.sid)
        return
    if data["language"] not in config.REMOTE_JUDGE_OJS[
            problem.remote_judge_oj]["availableLanguages"]:
        emit("server_response", {
            "ok": False,
            "message": "非法语言"
        },
             room=request.sid)
        return

    remote_judge_queue.send_task("judgers.remote.submit", [
        problem.remote_judge_oj,
        decode_json(remote_account.session), remote_account.account_id,
        problem.remote_problem_id, data["language"], data["code"],
        data["loginCaptcha"], data["submitCaptcha"], request.sid,
        remote_account.username, remote_account.password, problem.id,
        int(session.get("uid")), problem.public,
        list(reversed(config.TRACK_DELAY_INTERVAL)), data["contestID"],
        data["contestProblemID"]
    ])
    emit("message", {"message": "提交中..."}, room=request.sid)