Exemplo n.º 1
0
    def start(self):
        # create it if it doesn't exist already
        if not os.path.exists(QUIPT_VP_DESTINATION_FOLDER_PATH):
            os.mkdir(QUIPT_VP_DESTINATION_FOLDER_PATH)
            msg = "Set the folder where the quipt_system's Virtual Printer is saving the PDFs to: '" + QUIPT_VP_DESTINATION_FOLDER_PATH + "'"
            display_alert(msg, blocking=True)
        
        self.observer.schedule(QuiptPDFHandler(), QUIPT_VP_DESTINATION_FOLDER_PATH)
        self.observer.start()
        print("\n" + timestamp() + ready_text, flush=True)

        # The observer will keep observing in its own thread.
        # We are artificially keepthing this main thread alive 
        # because if it finishes execution, python will kill all 
        # its child threads too, which includes the observer's thread.
        # We just want this main thread to be alive through any means.
        try:
            while True:
                time.sleep(5)
        except KeyboardInterrupt:
            print(timestamp() + ": Closing all threads, please wait...", flush=True)
            self.observer.stop()
            self.observer.join()
            print(timestamp() + ": Done")
        except:
            print(timestamp() + ": An error occured while running: " + __file__, flush=True)
            display_alert(r"An error occured while running: " + __file__, blocking=False)
            self.observer.stop()
            self.observer.join()
Exemplo n.º 2
0
def xrp_transfer(order):
    """
    pretty wrap the asyncio xrp transfer
    """
    # FIXME log this event
    timestamp()
    line_number()
    print("\nORDER\n\n", {k: v for k, v in order.items() if k != "private"}, "\n")
    event = asyncio.get_event_loop().run_until_complete(xrp_transfer_execute(order))
    print(it("red", "XRP TRANSFERRED"))
    return event
def eos_transfer(order):
    """
    serialize, sign, and broadcast an order dictionary with nine keys
    """
    # FIXME log this event
    timestamp()
    line_number()
    print("\nORDER\n\n", {k: v for k, v in order.items() if k != "private"}, "\n")
    nodes = eosio_nodes()
    while 1:
        nodes.append(nodes.pop(0))
        node = nodes[0]
        # configure the url and port
        eosio_config.url = node
        eosio_config.port = ""
        print("\nADDRESS\n\n", node, "\n")
        # assemble the transfer operation dictionary
        operation = {
            "from": order["public"],
            "memo": "",
            # eos must have 4 decimal places formatted as string with space and "EOS"
            "quantity": precisely(order["quantity"], 4) + " EOS",
            "to": order["to"],
        }
        print("\nOPERATION\n\n", operation, "\n")
        # serialize the transfer operation
        raw = RawinputParams(
            "transfer",  # the operation type
            operation,  # the parameters
            "eosio.token",  # the contract; for our purposes always "eosio.token"
            order["public"] + "@active",  # the permitted party (or @owner)
        )
        print("\nSERIALIZE\n\n", raw.params_actions_list, "\n")
        # sign the transfer operation
        params = EosioParams(raw.params_actions_list, order["private"])
        print("\nSIGN\n\n", params.trx_json, "\n")
        # broadcast the transfer to the network
        try:
            ret = NodeNetwork.push_transaction(params.trx_json)
            print("\nBROADCAST\n\n", ret)
            if "processed" not in ret.keys():
                raise ValueError("NOT PROCESSED")
            print(it("red", "EOS TRANSFERRED"))
            break
        except Exception as error:
            print(error)
            print(it("red", "BROADCAST FAILED"), node, "attempting new api...")
            continue
    return ret
Exemplo n.º 4
0
def submit_problem_hook():
    ret = problem.submit_problem(session['tid'], request, session['is_zju_user'])
    tstamp = utilities.timestamp(datetime.utcnow())
    if tstamp > scoreboard.ctf_end:
        ret['message'] = '[比赛已经结束] %s' % ret['message']
        ret['status'] = 0
        ret['sore'] = 0
    return ret
def verify_ripple_account(account):
    """
    ripple public api consensus of get_account() returns True or False on existance
    """
    data = json_dumps({
        "method":
        "account_info",
        "params": [{
            "account": account,
            "strict": True,
            "ledger_index": "current",
            "queue": True,
        }],
    })
    ret = get(URL, data=data).json()["result"]
    timestamp()
    line_number()
    print(ret, "\n")
    is_account = False
    if "account_data" in ret.keys():
        is_account = True
    return is_account
def recycler():
    """
    in a background process, check incoming accounts & move funds to outbound accounts
    """
    networks = ["eos", "xrp"]
    print(it("red", f"INITIALIZING RECYCLER\n"), "networks:", networks, "\n")
    while 1:
        for network in networks:
            order = {}
            # EOS specific parameters
            if network == "eos":
                nil = NIL_EOS
                get_balance = eos_balance
                transfer = eos_transfer
            # XRP specific parameters
            elif network == "xrp":
                nil = NIL_XRP
                get_balance = xrp_balance
                transfer = xrp_transfer
            # recycle gateway incoming transfers to the outbound account
            for idx, gate in enumerate(GATE[network]):
                if idx:
                    balance = get_balance(gate["public"])
                    if balance > nil:
                        timestamp()
                        line_number()
                        print(it("red", f"{network} RECYCLER"))
                        print(gate["public"], balance, "\n")
                        # finalize the order
                        order["private"] = gate["private"]
                        order["public"] = gate["public"]
                        order["to"] = GATE[network][0]["public"]
                        order["quantity"] = balance - nil
                        # serialize, sign, and broadcast
                        print(transfer(order), "\n")
        time.sleep(60)
Exemplo n.º 7
0
def get_package_source_dir(args):
    """Get the sources for a package.

    PKGBUILDs contain instructions for downloading sources and then
    building them. We don't want to download the sources before every
    build, so this function downloads sources and stores them in a
    standard location so that they can be copied later rather than
    re-downloaded.

    If this function returns successfully, the abs directory for package
    will have been copied to sources_directory, and the sources for that
    package will have been downloaded into it.
    """
    if not os.path.isdir(args.abs_dir):
        die(Status.failure, "Could not find abs directory for dir '%s'" % args.abs_dir)
    shutil.copytree(args.abs_dir, args.permanent_source_dir)
    recursive_chown(args.permanent_source_dir)
    os.chdir(args.permanent_source_dir)

    # The --nobuild flag to makepkg causes it to download sources, but
    # not build them.
    command = (
        "sudo -u tuscan makepkg --nobuild --syncdeps "
        "--skipinteg --skippgpcheck --skipchecksums "
        "--noconfirm --nocolor --log --noprogressbar "
        "--nocheck --nodeps"
    )
    time = timestamp()
    cp = subprocess.run(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
    log("command", command, cp.stdout.splitlines(), time)

    success = False
    if cp.returncode:
        shutil.rmtree(args.permanent_source_dir)
    else:
        success = True

    return success
Exemplo n.º 8
0
def initialize_repositories(args):
    """Point pacman to toolchain builds.

    Ensure that pacman only installs toolchain builds of packages during
    the build process by pointing pacman.conf to the toolchain
    repository.
    """

    # Point pacman to our toolchain repository. This means commenting
    # out the official repositories in pacman.conf, and creating a new
    # entry for the local toolchain repository.

    lines = []
    with open("/etc/pacman.conf") as f:
        appending = True
        for line in f:
            if re.search("# REPOSITORIES", line):
                appending = False
            if appending:
                lines.append(line.strip())

    lines.append("[%s]" % toolchain_repo_name())
    lines.append("Server = file://%s" % args.toolchain_directory)

    with open("/etc/pacman.conf", "w") as f:
        for line in lines:
            print(line, file=f)

    log("info", "Removed vanilla repositories from pacman.conf and added:", lines[-2:])

    command = "pacman -Syy --noconfirm"
    time = timestamp()
    cp = subprocess.run(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
    log("command", command, cp.stdout.splitlines(), time)
    if cp.returncode:
        die(Status.failure)
Exemplo n.º 9
0
                        print_to_LL(current_order_pick_list_path)            # Even Page/2n -> LL
                        print_to_PP(current_order_packing_slip_path)         # Odd  Page/1n -> PP
                        print_to_PP(current_order_pick_list_path)            # Even Page/2n -> PP
                    else:
                        print_to_LL_stub(current_order_pick_list_path)       # Even Page/2n -> LL
                        print_to_PP_stub(current_order_packing_slip_path)    # Odd  Page/1n -> PP
                        print_to_PP_stub(current_order_pick_list_path)       # Even Page/2n -> PP


        # delete source pdf
        try:
            os.remove(path_to_source_pdf)
            print(timestamp() + ": Received PDF at path deleted: '" + path_to_source_pdf + "'", flush=True)
        except Exception as exp:
            error_msg =  timestamp() + ": " + __file__ + ": error while deleting source pdf at: '" + path_to_source_pdf + "'"
            print(error_msg, flush=True)
            # display_alert(error_msg, blocking=False)

        print("\n" + timestamp() + ready_text, flush=True)

        # Close the Preview window opened by the 'PDF Printer'
        script = r'tell application "System Events" to click first button of (first window of process "Preview")'
        p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
        stdout, stderr = p.communicate(script)

        
if __name__ == '__main__':
    print("\n\n" + timestamp() + ": " + __file__ + " started")
    w = QuiptWatcher()
    w.start()
Exemplo n.º 10
0
def copy_and_build(args):
    try:
        shutil.copytree(args.permanent_source_dir, args.build_dir)
    except shutil.Error as e:
        # e.args will be a list, containing a single list of 3-tuples.
        # We are interested in the third item of each tuple.
        errors = [err[2] for err in e.args[0]]
        die(Status.failure, "No source directory in source volume: %s" % args.permanent_source_dir, output=errors)
    recursive_chown(args.build_dir)
    os.chdir(args.build_dir)

    proc = subprocess.Popen(["/usr/bin/sloccount", "src"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    out, _ = proc.communicate()
    output = codecs.decode(out, errors="replace")
    if proc.returncode:
        log("die", "SLOCCount failed", output.splitlines())
    else:
        log_sloc(output.splitlines())

    # Add the --host option to invocations of ./configure
    with open("PKGBUILD", encoding="utf-8") as f:
        pkgbuild = f.read().splitlines()

    pkgbuild = [re.sub(r"configure\s", "configure --host x86_64-unknown-linux ", line) for line in pkgbuild]

    with open("PKGBUILD", "w", encoding="utf-8") as f:
        f.write("\n".join(pkgbuild))

    # The difference between this invocation and the one in
    # get_package_source_dir() is the --noextract flag. Sources should
    # already have been downloaded and extracted by
    # get_package_source_dir(), so we just want to build them.
    #
    # Also, the invocation in get_package_source_dir has the --nodeps
    # option, since we just wanted to download sources there. Here, we
    # do want to install dependencies (from our local toolchain
    # repository), so don't pass the --nodeps flag.
    if args.env_vars == None:
        args.env_vars = []

    command_env = os.environ.copy()
    for pair in args.env_vars:
        var, val = pair.split("=")
        command_env[var] = val

    command = (
        "sudo -u tuscan " + " ".join(args.env_vars) + " bear makepkg --noextract --syncdeps"
        " --skipinteg --skippgpcheck --skipchecksums"
        " --noconfirm --nocolor --log --noprogressbar"
        " --nocheck"
    )
    time = timestamp()

    proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=command_env)

    stdout_data, _ = proc.communicate()
    output = codecs.decode(stdout_data, errors="replace")

    log("command", command, output.splitlines(), time)

    # Pick up output left by bear
    if os.path.exists("compile_commands.json"):
        with open("compile_commands.json") as f:
            bear_output = json.load(f)
        log("bear", "bear", output=bear_output)
    else:
        log("die", "No bear output found in dir '%s'" % os.getcwd())

    native_tools = {}
    for native in glob("/tmp/tuscan-native-*"):
        with open(native) as f:
            tool = f.readlines()
        if tool:
            tool = tool[0].strip()
        if tool not in native_tools:
            native_tools[tool] = 0
        native_tools[tool] += 1
    if native_tools:
        log("native_tools", "native_tools", native_tools)

    return proc.returncode
Exemplo n.º 11
0
    def on_created(event):
        path_to_source_pdf = event.src_path

        # When 'PDF Printer' starts "printing" the pdf to the 
        # folder, this method is fired before 'PDF Printer' completes writing the pdf. So we add some
        # delay to give (more than) enough time to 'PDF Printer' to complete writing the pdf.
        print(timestamp() + ": Started receiving Quipt-PDF at: '" + path_to_source_pdf + "'", flush=True)
        print(timestamp() + ": Waiting for " + str(WAIT_TIME_DUE_TO_VP_SAVING_THE_FILE) + " seconds to complete the reception...", flush=True)
        time.sleep(WAIT_TIME_DUE_TO_VP_SAVING_THE_FILE)
        
        # From here on, you can (with very high probability) safely do whatever you want with the source pdf:
        print(timestamp() + ": Done waiting. Splitting Quipt-PDF received at: '" + path_to_source_pdf + "'", flush=True)

        # create it if it doesn't exist already
        if not os.path.exists(SPLIT_QUIPT_PDF_TARGET):
            os.mkdir(SPLIT_QUIPT_PDF_TARGET)

        # delete the old contents of the split_quipt_pdf_target to keep its size from
        # growing indefinitely, old contents are not of use for anything at this point:
        try:
            empty_dir(SPLIT_QUIPT_PDF_TARGET)
            print(timestamp() + ": Deleting all contents of dir at: '" + SPLIT_QUIPT_PDF_TARGET + "'", flush=True)
        except Exception as expp:
            error_msg =  timestamp() + ": " + __file__ + ": error while deleting deleting all contents of dir at: '" + SPLIT_QUIPT_PDF_TARGET + "'"
            print(error_msg, flush=True)


        pages_paths = None
        try:
            # extract individual pages from the pdf
            pages_paths = pdf_to_pages(path_to_source_pdf, SPLIT_QUIPT_PDF_TARGET)
            print(timestamp() + ": File at '" + path_to_source_pdf + "' split successfuly to '" + SPLIT_QUIPT_PDF_TARGET + "'", flush=True)
        except Exception as e:
            # if any error occurs, proceed to deleting the source pdf
            error_msg =  timestamp() + ": " + __file__ + ": error while splitting the received pdf into individual pages."
            print(error_msg, flush=True)
            # display_alert(error_msg, blocking=False)
            
        if pages_paths != None:
            # splitting was successful
            page_count = len(pages_paths)
            # quipt-pdfs have even number of pages, if a pdf with odd number of pages is received, do nothing with it
            if page_count % 2 == 0: 
                for page_no in range(1, page_count+1, 2):
                    # processing one page-pair per iteration of the loop
                    
                    current_order_packing_slip_path = pages_paths[page_no]   # Odd Page/1n
                    current_order_pick_list_path    = pages_paths[page_no+1] # Even Page/2n

                    # the collation happens in this piece of code:
                    if PRINT_TO_PHYSICAL_PRINTER:
                        print_to_LL(current_order_pick_list_path)            # Even Page/2n -> LL
                        print_to_PP(current_order_packing_slip_path)         # Odd  Page/1n -> PP
                        print_to_PP(current_order_pick_list_path)            # Even Page/2n -> PP
                    else:
                        print_to_LL_stub(current_order_pick_list_path)       # Even Page/2n -> LL
                        print_to_PP_stub(current_order_packing_slip_path)    # Odd  Page/1n -> PP
                        print_to_PP_stub(current_order_pick_list_path)       # Even Page/2n -> PP


        # delete source pdf
        try:
            os.remove(path_to_source_pdf)
            print(timestamp() + ": Received PDF at path deleted: '" + path_to_source_pdf + "'", flush=True)
        except Exception as exp:
            error_msg =  timestamp() + ": " + __file__ + ": error while deleting source pdf at: '" + path_to_source_pdf + "'"
            print(error_msg, flush=True)
            # display_alert(error_msg, blocking=False)

        print("\n" + timestamp() + ready_text, flush=True)

        # Close the Preview window opened by the 'PDF Printer'
        script = r'tell application "System Events" to click first button of (first window of process "Preview")'
        p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
        stdout, stderr = p.communicate(script)
Exemplo n.º 12
0
def main():
    """Install vanilla bootstrap packages from local mirror.

    Installing all the bootstrap packages is a lengthy (and highly
    disk-IO bound, thus serializing) procedure, so it's best to do it
    only once. Instead of having each container running the make_package
    stage installing the boostrap packages, we install the bootstrap
    packages in this container and then base the make_package containers
    on the image of this container.
    """
    parser = get_argparser()
    args = parser.parse_args()

    # GPG takes time. Remove package signature checks.
    lines = []
    with open("/etc/pacman.conf") as f:
        for line in f:
            if re.search("SigLevel", line):
                lines.append("SigLevel = Never")
            else:
                lines.append(line.strip())
    with open("/etc/pacman.conf", "w") as f:
        for line in lines:
            print(line.strip(), file=f)

    name_data_file = os.path.join(args.shared_directory, "get_base_package_names", "latest", "names.json")

    with open(name_data_file) as f:
        name_data = json.load(f)
    bootstrap_packs = name_data["base"] + name_data["base_devel"] + name_data["tools"] + ["sloccount"]

    vanilla = "file://" + args.mirror_directory + "/$repo/os/$arch"
    log("info", "Printing %s to mirrorlist" % vanilla)
    with open("/etc/pacman.d/mirrorlist", "w") as f:
        print("Server = " + vanilla, file=f)

    cmd = "pacman -Syy --noconfirm"
    time = timestamp()
    cp = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
    log("command", cmd, cp.stdout.splitlines(), time)
    if cp.returncode:
        exit(1)

    cmd = "pacman -Su --noconfirm " + " ".join(bootstrap_packs)
    time = timestamp()
    cp = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
    log("command", cmd, cp.stdout.splitlines(), time)
    if cp.returncode:
        exit(1)

    run_cmd("useradd -m -s /bin/bash tuscan", as_root=True)

    # User 'tuscan' needs to be able to use sudo without being harassed
    # for passwords) and so does root (to su into tuscan)
    with open("/etc/sudoers", "a") as f:
        print("tuscan ALL=(ALL) NOPASSWD: ALL", file=f)
        print("root ALL=(ALL) NOPASSWD: ALL", file=f)

    # Download and install bear
    with tempfile.TemporaryDirectory() as d:
        url = "https://github.com/karkhaz/Bear/blob/master/" "bear-2.1.5-1-x86_64.pkg.tar.xz?raw=true"
        response = urllib.request.urlopen(url)
        tar_file = response.read()
        pkg_name = "bear.pkg.tar.xz"
        with open(os.path.join(d, pkg_name), "wb") as f:
            f.write(tar_file)
        os.chdir(d)
        cmd = "pacman -U --noconfirm %s" % pkg_name
        cp = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
        log("command", cmd, cp.stdout.splitlines())
        if cp.returncode:
            exit(1)

    os.mkdir("/toolchain_root")
    shutil.chown("/toolchain_root", "tuscan")

    # Replace native tools with thin wrappers
    with open("/build/tool_redirect_rules.yaml") as f:
        transforms = yaml.load(f)
    execs = transforms["overwrite"] + list(transforms["replacements"].keys())
    for e in set(execs):
        execs.remove(e)
    if execs:
        log(
            "error",
            ("The following executables have been specified " "twice in the tool_redirect_rules.yaml: %s" % str(execs)),
        )
        exit(1)

    for e in transforms["overwrite"]:
        transforms["replacements"][e] = e
    transforms.pop("overwrite", None)

    jinja = jinja2.Environment(loader=jinja2.FileSystemLoader(["/build"]))

    wrapper_temp = jinja.get_template("tool_wrapper.c")

    with tempfile.TemporaryDirectory() as tmp_dir:
        for native, toolchain in transforms["replacements"].items():
            wrapper = wrapper_temp.render(
                native_program=native, toolchain_prefix=transforms["prefix"], toolchain_program=toolchain
            )

            with tempfile.NamedTemporaryFile("w", suffix=".c") as temp:
                temp.write(wrapper)
                temp.flush()
                cmd = "gcc -o %s %s" % (os.path.join(tmp_dir, native), temp.name)
                proc = subprocess.Popen(
                    cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True
                )
                out, _ = proc.communicate()
                if proc.returncode:
                    body = "%s\n%s\n%s" % (cmd, out, wrapper)
                    log("error", "Failed to compile compiler wrapper", body=body)
                    exit(1)
        for wrapper in os.listdir(tmp_dir):
            shutil.move(os.path.join(tmp_dir, wrapper), os.path.join("/usr/bin", wrapper))

    setup.toolchain_specific_setup(args)

    exit(0)
Exemplo n.º 13
0
def withdraw(op):
    """
    in production print_op is replaced with withdraw

        The user has returned some UIA to the issuer!

    upon hearing an on chain UIA transfer to the gateway with memo
    from this definition we trigger a gateway withdrawal event
    release the user's foreign chain funds to the memo
    and burn the returned UIA upon irreversible receipt
    """
    # if its a transfer to gateway with a memo
    tgm = False
    if op[0] == 0:  # transfer
        if op[1]["to"] in [
                GATE["uia"]["eos"]["issuer_id"],
                GATE["uia"]["xrp"]["issuer_id"],
        ]:
            print(it("yellow", "gate uia transfer"))
            if "memo" in op[1].keys():
                print(
                    it("red", "TRANSFER TO GATEWAY WITH MEMO\n\n"),
                    it("yellow", op),
                    "\n",
                )
                tgm = True
            else:
                print(it("red", "WARN: transfer to gateway WITHOUT memo"))
    if tgm:
        timestamp()
        line_number()
        order = {}
        # extract the asset_id of the transfer
        uia_id = op[1]["amount"]["asset_id"]
        print("uia_id", uia_id, "\n")
        # EOS specific parameters
        if uia_id == GATE["uia"]["eos"]["asset_id"]:
            network = "eos"
            verify = verify_eosio_account
            listen = listener_eosio
            transfer = eos_transfer
            # eos transfers require a url
            order["url"] = eosio_nodes()[
                0]  # FIXME what happens if node fails?
        # XRP specific parameters
        elif uia_id == GATE["uia"]["xrp"]["asset_id"]:
            network = "xrp"
            verify = verify_ripple_account
            listen = listener_ripple
            transfer = xrp_transfer
        memo = op[1][
            "memo"]  # dict with keys("from", "to", "nonce", "message")
        order["private"] = GATE[network][0]["private"]
        order["public"] = GATE[network][0]["public"]
        # convert graphene operation amount to human readable
        order["quantity"] = (op[1]["amount"]["amount"] /
                             10**GATE["uia"][network]["asset_precision"])
        # decode the client's memo using the issuers private key
        order["to"] = ovaltine(memo, GATE["uia"][network]["issuer_private"])
        print(f"decoded {network} client", order["to"], "\n")
        # confirm we're dealing with a legit client address
        if verify(order["to"]):
            listener = Process(
                target=listen,
                args=(
                    0,
                    order["quantity"],
                    "reserve",  # issuer_action
                    None,  # # always None for reserve
                ),
            )  # upon hearing real foreign chain transfer, reserve the uia equal
            listener.start()
            print(
                it(
                    "red",
                    f"STARTING {network} LISTENER TO RESERVE {order['quantity']}\n",
                ))
            # wait for listener subprocess to warm up then transfer the order
            time.sleep(30)
            timestamp()
            line_number()
            print(transfer(order))
        else:
            print(
                it("red",
                   f"WARN: memo is NOT a valid {network} account name\n"))
def listener_ripple(account_idx=0,
                    amount=None,
                    issuer_action=None,
                    client_id=None,
                    nonce=0):
    """
    for every block from initialized until detected
        check for transaction to the gateway
            issue or reserve uia upon receipt of gateway transfer

    :param int(account_idx) # from gateway_state.py
    :param float(amount)
    :param str(issuer_action) # None in unit test case
    :param str(client_id) #1.2.X
    :return None:
    """
    gateway = GATE["xrp"][account_idx]["public"]
    uia = GATE["uia"]["xrp"]["asset_name"]
    start_ledger_num = get_validated_ledger()
    checked_ledgers = [start_ledger_num]
    timestamp()
    line_number()
    print(f"nonce {nonce}", "Start ledger:", start_ledger_num, "\n")
    start = time.time()
    while 1:
        elapsed = time.time() - start
        if elapsed > DEPOSIT_TIMEOUT:
            print(f"nonce {nonce}", it("red", "XRP GATEWAY TIMEOUT"), gateway)
            # after timeout, release the address
            if issuer_action == "issue":
                unlock_address("ripple", account_idx, DEPOSIT_PAUSE)
            break
        # get the latest validated ledger number
        current_ledger = get_validated_ledger()
        # get the latest ledger number we checked
        max_checked_ledger = max(checked_ledgers)
        # if there are any new validated ledgers
        if current_ledger > max_checked_ledger + 1:
            # check every ledger from last check till now
            for ledger_num in range(max_checked_ledger + 1, current_ledger):
                print(
                    f"nonce {nonce}",
                    it("green", "Ripple Validated Ledger"),
                    it("yellow", ledger_num),
                    time.ctime()[11:19],
                )
                # get each new validated ledger
                transactions = get_ledger(ledger_num)
                # iterate through all transactions in the list of transactions
                for trx in transactions:
                    if not isinstance(trx["Amount"], dict):
                        # localize data from the transaction
                        amount = int(
                            trx["Amount"]) / 10**6  # convert drops to xrp
                        trx_from = trx["Account"]
                        trx_to = trx["Destination"]
                        # during unit testing
                        if issuer_action is None:
                            print(f"nonce {nonce}", ledger_num, trx, "\n")
                        # determine if it is a transfer to or from the gateway
                        if gateway in [trx_from, trx_to]:
                            timestamp()
                            line_number()
                            # establish gateway transfer direction
                            direction = "INCOMING"
                            if gateway == trx_from:
                                direction = "OUTGOING"
                            print(
                                f"nonce {nonce}",
                                it(
                                    "red",
                                    f"{direction} XRP GATEWAY TRANSFER DETECTED\n",
                                ),
                                f"amount {amount}\n",
                                f"from {trx_from}\n",
                                f"to {trx_to}\n",
                            )
                            # the client_id was assigned deposit gateway address
                            # issue UIA to client_id upon receipt of their foreign funds
                            if issuer_action == "issue" and trx_to == gateway:
                                print(
                                    f"nonce {nonce}",
                                    it(
                                        "red",
                                        f"ISSUING {amount} {uia} to {client_id}\n",
                                    ),
                                )
                                issue("xrp", amount, client_id)
                                # in subprocess unlock the deposit address after wait
                                delay = DEPOSIT_TIMEOUT - elapsed + DEPOSIT_PAUSE
                                unlock_address("xrp", account_idx, delay)
                                return  # but immediately kill the listener
                            # the parent process will soon send foreign funds to client_id
                            # reserve UIA upon hearing proof of this transfer
                            if issuer_action == "reserve" and trx_from == gateway:
                                print(
                                    f"nonce {nonce}",
                                    it("red", f"RESERVING {amount} {uia}\n"),
                                )
                                reserve("xrp", amount)
                                return  # kill the listener
                # append this ledger number to the list of checked numbers
                if ledger_num not in checked_ledgers:
                    checked_ledgers.append(ledger_num)
Exemplo n.º 15
0
def submit_problem(tid, request, is_zju_user):
    """Handle problem submission.

    Gets the key and pid from the submitted problem, calls the respective grading function if the values aren't empty.
    If correct all relevant cache values are cleared. The submission is the inserted into the database
    (an attempt is made). A relevant message is returned if the problem has already been solved or the answer
    has been tried.
    """
    # Nginx Configuration Fixed --libmaru
    """
    import common
    common.log('Hello, '+request.remote_addr, 'ERROR')
    """

    """
    response = captcha.submit(
        request.form.get('recaptcha_challenge', ''),
        request.form.get('recaptcha_response', ''),
        '6LcPFPESAAAAAIkncbbAOfUi6sTSrMMxKVA9EcMq',
        request.remote_addr
    )

    if not response.is_valid:
        return {"status": 0, "points": 0, "message": "验证码不正确."}
    """

    t_interval = 10
    last_submitted = cache.get('last_submitted_' + tid)
    if not last_submitted:
        cache.set('last_submitted_' + tid, True, t_interval)
    else:
        return {"status": 0, "points": 0, "message": "相邻提交之间隔须多于%d秒, 请稍后再试." % t_interval}

    pid = request.form.get('pid', '')
    key = request.form.get('key', '')
    if pid == '':
        return {"status": 0, "points": 0, "message": "题目名字不能为空."}
    if key == '':
        return {"status": 0, "points": 0, "message": "答案不能为空."}
    #if pid not in [p['pid'] for p in load_unlocked_problems(tid)]:
    #    return {"status": 0, "points": 0, "message": "You cannot submit problems you have not unlocked."}
    pid = pid.encode('utf8').strip()
    # key = key.encode('utf8').strip()
    prob = cache.get('problem_' + pid)
    if prob is None:
        prob = db.problems.find_one({"pid": pid})
        if prob is None:
            return {"status": 0, "points": 0, "message": "未找到题目'%s'." %pid}
        del prob['_id']
        cache.set('problem_' + pid, json.dumps(prob), 60 * 60)
    else:
        prob = json.loads(prob)

    correct = False
    grader_type = prob.get('grader-type', 'key')
    if grader_type == 'file':
        (correct, message) = imp.load_source(prob['grader'][:-3], "./graders/" + prob['grader']).grade(tid, key)
    elif grader_type == 'key':
        correct = prob['key'] == key
        message = prob.get('correct_msg', '回答正确!') if correct else prob.get('wrong_msg', '回答错误!')
    message = message.encode('utf8')
    
    tstamp = utilities.timestamp(datetime.utcnow())
    submission = {'tid': tid,
                  'timestamp': tstamp,
                  'pid': pid,
                  'ip': request.headers.get('X-Real-IP', None),
                  'key': key,
                  'correct': correct}

    if correct:
        #cache.delete('unlocked_' + tid)  # Clear the unlocked problem cache as it needs updating
        cache.delete('solved_' + tid)  # Clear the list of solved problems
        cache.delete('problems_' + tid)
        if is_zju_user:
            cache.delete('scoreboard_zju')  
        else:
            cache.delete('scoreboard_public')  
        cache.delete('teamscore_' + tid)  # Clear the team's cached score
        cache.delete('lastsubdate_' + tid)
        try:
            db.submissions.insert(submission)
        except DuplicateKeyError:
            return {"status": 0, "points": 0, "message": "你已解决此题!"}
    else:
        try:
            db.submissions.insert(submission)
        except DuplicateKeyError:
            return {"status": 0, "points": 0, "message": "你已提交过这一错误答案!"}
    return {"status": 1 if correct else 0, "points": prob.get('basescore', 0), "message": message}
Exemplo n.º 16
0
    def on_get(self, req, resp):
        """
        When there is a get request made to the deposit server api
        User GET request includes the client_id's BitShares account_name
        Select a gateway wallet from list currently available; remove it from the list
        the available address list will be stored in a json_ipc text pipe
        Server RESPONSE is deposit address and timeout
        After timeout or deposit return address to text pipe list
        """
        confirm_time = {
            "eos": 30,
            "xrp": 2,
        }
        # create a millesecond nonce to log this event
        nonce = milleseconds()
        # extract the incoming parameters to a dictionary
        data = dict(req.params)
        timestamp()
        line_number()
        print(it("red", "DEPOSIT SERVER RECEIVED A GET REQUEST"), "\n")
        call(["hostname", "-I"])
        print(data, "\n")
        client_id = data["client_id"]
        uia = data["uia_name"]
        # translate the incoming uia request to the appropriate network
        network = ""
        if uia == GATE["uia"]["xrp"]["asset_name"]:
            network = "xrp"
        elif uia == GATE["uia"]["eos"]["asset_name"]:
            network = "eos"
        print("network", network, "\n")
        if network in ["xrp", "eos"]:
            # lock an address until this transaction is complete
            gateway_idx = lock_address(network)
            print("gateway index", gateway_idx, "\n")
            if gateway_idx is not None:
                timestamp()
                line_number()
                deposit_address = GATE[network][gateway_idx]["public"]
                print("gateway address", deposit_address, "\n")
                # format a response json
                msg = json_dumps({
                    "response":
                    "success",
                    "server_time":
                    nonce,
                    "deposit_address":
                    deposit_address,
                    "gateway_timeout":
                    "30 MINUTES",
                    "msg":
                    (f"Welcome {client_id}, please deposit your gateway issued "
                     +
                     f"{network} asset, to the {uia} gateway 'deposit_address' "
                     +
                     "in this response.  Make ONE transfer to this address, " +
                     "within the gateway_timeout specified. Transactions on " +
                     f"this network take about {confirm_time[network]} " +
                     "minutes to confirm."),
                })
                print(
                    it("red",
                       f"STARTING {network} LISTENER TO ISSUE to {client_id}"),
                    "\n",
                )
                # dispatch the appropriate listener protocol
                listen = {"eos": listener_eosio, "xrp": listener_ripple}
                # in subprocess listen for payment from client_id to gateway[idx]
                # upon receipt issue asset, else timeout
                listener = Process(
                    target=listen[network],
                    args=(gateway_idx, None, "issue", client_id, nonce),
                )
                listener.start()
                print(f"{network}listener started", "\n")

            else:
                msg = json_dumps({
                    "response":
                    "error",
                    "server_time":
                    nonce,
                    "msg":
                    f"all {uia} gateway addresses are in use, " +
                    "please try again later",
                })

        else:
            msg = json_dumps({
                "response":
                "error",
                "server_time":
                nonce,
                "msg":
                f"{uia} is an invalid gateway UIA, please try again",
            })
        # log the response and build the response body with a data dictionary
        doc = str(nonce) + "_" + uia + "_" + client_id + ".txt"
        json_ipc(doc=doc, text=msg)
        time.sleep(
            5)  # allow some time for listener to start before offering address
        print(msg, "\n")
        resp.body = msg
        resp.status = HTTP_200
Exemplo n.º 17
0
def listener_eosio(
    account_idx=0, amount=None, issuer_action=None, client_id=None, nonce=0
):
    """
    for every block from initialized until detected
        check for transaction to the gateway
            issue or reserve uia upon receipt of gateway transfer

    :param int(account_idx) # from gateway_state.py
    :param float(amount)
    :param str(issuer_action) # None in unit test case
    :param str(client_id) #1.2.X
    :return None:
    """
    gateway = GATE["eos"][account_idx]["public"]
    uia = GATE["uia"]["eos"]["asset_name"]
    start_block_num = get_irreversible_block()
    checked_blocks = [start_block_num]
    print("Start Block:", start_block_num, "\n")
    # block["transactions"][0]["trx"]["transaction"]["actions"][0] holds:
    #   ["name"] # str("transfer") etc.
    #   ["data"] # dict.keys() [to, from, quantity]
    start = time.time()
    while 1:
        elapsed = time.time() - start
        if elapsed > DEPOSIT_TIMEOUT:
            print(
                f"nonce {nonce}",
                it("red", f"{nonce} EOS GATEWAY TIMEOUT"),
                gateway,
                "\n",
            )
            # 10 minutes after timeout, release the address
            if issuer_action == "issue":
                unlock_address("eos", account_idx, DEPOSIT_PAUSE)
            break
        # get the latest irreversible block number
        current_block = get_irreversible_block()
        # get the latest block number we checked
        max_checked_block = max(checked_blocks)
        # if there are any new irreversible blocks
        if current_block > max_checked_block + 1:
            new_blocks = range(max_checked_block + 1, current_block)
            # eosio has a 0.5 second block time, to prevail over network latency:
            # *concurrently* fetch all new blocks
            block_processes = {}  # dictionary of multiprocessing "Process" events
            blocks_pipe = {}  # dictionary of multiprocessing "Value" pipes
            # spawn multpiple processes to gather the "new" blocks
            for block_num in new_blocks:
                manager = Manager()
                blocks_pipe[block_num] = manager.Value(c_wchar_p, "")
                block_processes[block_num] = Process(
                    target=get_block, args=(block_num, blocks_pipe,)
                )
                block_processes[block_num].start()
            # join all subprocess back to main process; wait for all to finish
            for block_num in new_blocks:
                block_processes[block_num].join()
            # extract the blocks from each "Value" in blocks_pipe
            blocks = {}
            for block_num, block in blocks_pipe.items():
                # create block number keyed dict of block data dicts
                blocks[block_num] = block.value
            # with new cache of blocks, check every block from last check till now
            for block_num in new_blocks:
                print(
                    f"nonce {nonce}",
                    it("purple", "Eosio Irreversible Block"),
                    it("yellow", block_num),
                    time.ctime()[11:19],
                    it("purple", int(time.time())),
                    "\n",
                )
                # get each new irreversible block
                block = blocks[block_num]
                transactions = []
                try:
                    transactions = block["transactions"]
                except Exception:
                    pass
                # iterate through all transactions in the list of transactions
                for trx in transactions:
                    actions = []
                    try:
                        actions = trx["trx"]["transaction"]["actions"]
                    except Exception:
                        pass
                    # if there are any, iterate through the actions
                    for action in actions:
                        try:
                            # sort by tranfer ops
                            if (
                                action["name"] == "transfer"
                                # SECURITY: ensure it is the correct contract!!!
                                and action["account"] == "eosio.token"
                            ):
                                # extract transfer op data
                                qty = action["data"]["quantity"]
                                trx_to = action["data"]["to"]
                                trx_from = action["data"]["from"]
                                trx_asset = qty.split(" ")[1].upper()
                                trx_amount = float(qty.split(" ")[0])
                                # sort again by > nil amount of eos
                                if trx_amount > 0.0001 and trx_asset == "EOS":
                                    # during unit testing
                                    # if issuer_action is None:
                                    if DEV:
                                        print(f"nonce {nonce}", block_num, action, "\n")
                                    # if there are any transfers listed
                                    if gateway in [trx_from, trx_to]:
                                        timestamp()
                                        line_number()
                                        print(
                                            f"nonce {nonce}",
                                            it("red", "GATEWAY TRANSFER DETECTED\n"),
                                            f"amount {trx_amount} {trx_asset}\n",
                                            f"from {trx_from}\n",
                                            f"to {trx_to}\n",
                                        )

                                        # issue UIA to client_id
                                        # upon receipt of their foreign funds
                                        if (
                                            issuer_action == "issue"
                                            and trx_to == gateway
                                        ):
                                            print(
                                                f"nonce {nonce}",
                                                it(
                                                    "red",
                                                    f"ISSUING {trx_amount} {uia} to "
                                                    + f"{client_id}\n",
                                                ),
                                            )
                                            issue("eos", trx_amount, client_id)
                                            # unlock the deposit address after some time
                                            delay = (
                                                DEPOSIT_TIMEOUT
                                                - elapsed
                                                + DEPOSIT_PAUSE
                                            )
                                            unlock_address("eos", account_idx, delay)
                                            return
                                        # when returning foreign funds to client,
                                        # upon receipt, reserve equal in UIA
                                        if (
                                            issuer_action == "reserve"
                                            and trx_from == gateway
                                            and trx_amount == amount
                                        ):
                                            print(
                                                f"nonce {nonce}",
                                                it(
                                                    "red", f"RESERVING {amount} {uia}\n"
                                                ),
                                            )
                                            reserve("eos", trx_amount)
                                            return
                        except Exception:
                            print(f"nonce {nonce}", "action", action, "\n")
                            print(traceback.format_exc(), "\n")

                if block_num not in checked_blocks:
                    checked_blocks.append(block_num)
Exemplo n.º 18
0
__email__ = ["*****@*****.**", "*****@*****.**"]
__status__ = "Production"


from datetime import datetime
import time
import json
import group
from common import db
from common import cache
from common import esc

import problem
import utilities

ctf_start = utilities.timestamp(datetime(2014, 4, 5, 0))
ctf_end = utilities.timestamp(datetime(2014, 4, 6, 16))
# # For debugging only
# ctf_start = utilities.timestamp(datetime(2014, 04, 01, 00) - datetime.utcnow() + datetime.now())


def get_group_scoreboards(tid):
    """Gets the group scoreboards.

    Because of the multithreaded implementation we rebuild the scoreboard in the aggregator, this call can only
    return a value from cache. This prevents multiple page requests from invoking a scoreboard rebuild simultaneously.
    Get all groups a users is a member of and look for group scoreboards for each of these groups.
    """
    group_scoreboards = []
    groups = group.get_group_membership(tid)
    for g in groups:
Exemplo n.º 19
0
def copy_and_build(args):
    try:
        shutil.copytree(args.permanent_source_dir, args.build_dir)
    except shutil.Error as e:
        # e.args will be a list, containing a single list of 3-tuples.
        # We are interested in the third item of each tuple.
        errors = [err[2] for err in e.args[0]]
        die(Status.failure,
            "No source directory in source volume: %s" %
            args.permanent_source_dir,
            output=errors)
    recursive_chown(args.build_dir)
    os.chdir(args.build_dir)

    # Add the --host option to invocations of ./configure
    with open("PKGBUILD", encoding="utf-8", errors="ignore") as f:
        pkgbuild = f.read().splitlines()

    if args.toolchain == "android":
        pkgbuild = [
            re.sub(r"configure\s", ("configure --build=x86_64-unknown-linux "
                                    "--host=arm-linux-androideabi "), line)
            for line in pkgbuild
        ]
    else:
        pkgbuild = [
            re.sub(r"configure\s", "configure --host=x86_64-unknown-linux ",
                   line) for line in pkgbuild
        ]

    with open("PKGBUILD", "w", encoding="utf-8") as f:
        f.write("\n".join(pkgbuild))

    # This invocation of makepkg has the --noextract flag, because
    # sources should already have been extracted during the creation of
    # the base image (see stages/create_base_image/getpkgs.py). We still
    # need to perform all other stages of package building, including
    # the prepare() function that is called just before the build()
    # function.
    #
    # The invocation also has the --syncdeps flag; this is fine, because
    # anything that this package depends on should already have been
    # built and its hybrid package will have been installed.
    if args.env_vars == None:
        args.env_vars = []

    command_env = os.environ.copy()
    for pair in args.env_vars:
        var, val = pair.split("=")
        command_env[var] = val

    command = ("sudo -u tuscan " + " ".join(args.env_vars) +
               " red makepkg --noextract --syncdeps"
               " --skipinteg --skippgpcheck --skipchecksums"
               " --noconfirm --nocolor --log --noprogressbar"
               " --nocheck")
    time = timestamp()

    proc = subprocess.Popen(command.split(),
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT,
                            env=command_env)

    stdout_data, _ = proc.communicate()
    output = codecs.decode(stdout_data, errors="replace")

    log("command", command, output.splitlines(), time)

    # Measure LOC
    loc_proc = subprocess.Popen(
        ["/usr/bin/sloccount", "--addlang", "makefile", "src"],
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT)
    out, _ = loc_proc.communicate()
    output = codecs.decode(out, errors="replace")
    if loc_proc.returncode:
        log("die", "SLOCCount failed", output.splitlines())
    else:
        log_sloc(output.splitlines())

    # Pick up output left by red
    try:
        if os.path.exists("compile_commands.json"):
            with open("compile_commands.json") as f:
                red_output = json.load(f)
            log("red", "red", output=red_output)
        else:
            log("die", "No red output found in dir '%s'" % os.getcwd())
    except json.decoder.JSONDecodeError as e:
        log("red", "red", output=[])

    red_errors = []
    for native in glob("/tmp/red-error-*"):
        with open(native) as f:
            lines = f.readlines()
        red_errors.append({
            "category": lines[0].strip(),
            "pid": lines[1].strip(),
            "info": "\n".join(lines[2:])
        })
        os.unlink(native)

    log("red_errors", "red_errors", output=red_errors)

    return proc.returncode