def test_update_all_nodes(network, args): primary, _ = network.find_nodes() first_code_id, new_code_id = [ get_code_id(args.oe_binary, infra.path.build_lib_path(pkg, args.enclave_type)) for pkg in [args.package, args.replacement_package] ] LOG.info("Add new code id") network.consortium.add_new_code(primary, new_code_id) with primary.client() as uc: r = uc.get("/node/code") versions = sorted(r.body.json()["versions"], key=lambda x: x["digest"]) expected = sorted( [ {"digest": first_code_id, "status": "ALLOWED_TO_JOIN"}, {"digest": new_code_id, "status": "ALLOWED_TO_JOIN"}, ], key=lambda x: x["digest"], ) assert versions == expected, versions LOG.info("Remove old code id") network.consortium.retire_code(primary, first_code_id) with primary.client() as uc: r = uc.get("/node/code") versions = sorted(r.body.json()["versions"], key=lambda x: x["digest"]) expected = sorted( [ {"digest": new_code_id, "status": "ALLOWED_TO_JOIN"}, ], key=lambda x: x["digest"], ) assert versions == expected, versions old_nodes = network.nodes.copy() LOG.info("Start fresh nodes running new code") for _ in range(0, len(network.nodes)): new_node = network.create_and_trust_node( args.replacement_package, "local://localhost", args ) assert new_node LOG.info("Retire original nodes running old code") for node in old_nodes: primary, _ = network.find_nodes() network.consortium.retire_node(primary, node) # Elections take (much) longer than a backup removal which is just # a commit, so we need to adjust our timeout accordingly, hence this branch if node.node_id == primary.node_id: new_primary, new_term = network.wait_for_new_primary(primary.node_id) LOG.debug(f"New primary is {new_primary.node_id} in term {new_term}") primary = new_primary network.nodes.remove(node) node.stop() LOG.info("Check the network is still functional") reconfiguration.check_can_progress(new_node) return network
def test_new_joiner_helps_liveness(network, args): primary, backups = network.find_nodes() # Issue some transactions, so there is a ledger history that a new node must receive network.txs.issue(network, number_txs=10) # Remove a node, leaving the network frail network.retire_node(primary, backups[-1]) backups[-1].stop() primary, backups = network.find_nodes() with contextlib.ExitStack() as stack: # Add a new node, but partition them before trusting them new_node = network.create_node("local://localhost") network.join_node(new_node, args.package, args, from_snapshot=False) new_joiner_partition = [new_node] new_joiner_rules = stack.enter_context( network.partitioner.partition([primary, *backups], new_joiner_partition)) # Trust the new node, and wait for commit of this (but don't ask the new node itself, which doesn't know this yet) network.trust_node(new_node, args, no_wait=True) check_can_progress(primary) # Partition the primary, temporarily creating a minority service that cannot make progress minority_partition = backups[len(backups) // 2:] + new_joiner_partition minority_rules = stack.enter_context( network.partitioner.partition(minority_partition)) # This is an unusual situation, where we've actually produced a dead partitioned node. # Initially any write requests will timeout (failed attempt at forwarding), and then # the node transitions to a candidate with nobody to talk to. Rather than trying to # catch the errors of these states quickly, we just sleep until the latter state is # reached, and then confirm it was reached. time.sleep(network.observed_election_duration) with backups[0].client("user0") as c: r = c.post("/app/log/private", {"id": 42, "msg": "Hello world"}) assert r.status_code == http.HTTPStatus.SERVICE_UNAVAILABLE # Restore the new node to the service new_joiner_rules.drop() # Confirm that the new node catches up, and progress can be made in this majority partition network.wait_for_new_primary(primary, minority_partition) check_can_progress(new_node) # Explicitly drop rules before continuing minority_rules.drop() network.wait_for_primary_unanimity() primary, _ = network.find_nodes() network.wait_for_all_nodes_to_commit(primary=primary)
def test_learner_does_not_take_part(network, args): primary, backups = network.find_nodes() f_backups = backups[:network.get_f() + 1] new_node = network.create_node("local://localhost") network.join_node(new_node, args.package, args, from_snapshot=False) with network.partitioner.partition(f_backups): check_does_not_progress(primary, timeout=5) try: network.consortium.trust_node( primary, new_node.node_id, timeout=ceil(args.join_timer * 2 / 1000), valid_from=str( infra.crypto.datetime_to_X509time(datetime.now())), ) new_node.wait_for_node_to_join(timeout=ceil(args.join_timer * 2 / 1000)) join_failed = False except Exception: join_failed = True if not join_failed: raise Exception("join succeeded unexpectedly") with new_node.client(self_signed_ok=True) as c: r = c.get("/node/network/nodes/self") assert r.body.json()["status"] == "Learner" r = c.get("/node/consensus") assert new_node.node_id in r.body.json()["details"]["learners"] # New node joins, but cannot be promoted to TRUSTED without f other backups check_does_not_progress(primary, timeout=5) with new_node.client(self_signed_ok=True) as c: r = c.get("/node/network/nodes/self") assert r.body.json()["status"] == "Learner" r = c.get("/node/consensus") assert new_node.node_id in r.body.json()["details"]["learners"] network.wait_for_primary_unanimity() primary, _ = network.find_nodes() network.wait_for_all_nodes_to_commit(primary=primary) check_can_progress(primary)
def test_module_set_and_remove(network, args): primary, _ = network.find_nodes() LOG.info("Member makes a module set proposal") bundle_dir = os.path.join(THIS_DIR, "basic-module-import") module_file_path = os.path.join(bundle_dir, "src", "foo.js") module_path = "/anything/you/want/when/setting/manually/dot/js.js" make_module_set_proposal(module_path, module_file_path, network) module_content = open(module_file_path, "r").read() with primary.client(network.consortium.get_any_active_member().local_id) as c: r = c.post("/gov/read", {"table": "public:gov.modules", "key": module_path}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body.json()["js"] == module_content, r.body LOG.info("Member makes a module remove proposal") proposal_body, careful_vote = ccf.proposal_generator.remove_module(module_path) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body ) network.consortium.vote_using_majority(primary, proposal, careful_vote) with primary.client(network.consortium.get_any_active_member().local_id) as c: r = c.post("/gov/read", {"table": "public:gov.modules", "key": module_path}) assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code return network
def test_node_replacement(network, args): primary, backups = network.find_nodes() node_to_replace = backups[-1] LOG.info(f"Retiring node {node_to_replace.local_node_id}") network.retire_node(primary, node_to_replace) node_to_replace.stop() check_can_progress(primary) LOG.info("Adding one node on same address as retired node") replacement_node = network.create_node( f"local://{node_to_replace.rpc_host}:{node_to_replace.rpc_port}", node_port=node_to_replace.node_port, ) network.join_node(replacement_node, args.package, args, from_snapshot=False) network.trust_node(replacement_node, args) assert replacement_node.node_id != node_to_replace.node_id assert replacement_node.rpc_host == node_to_replace.rpc_host assert replacement_node.node_port == node_to_replace.node_port assert replacement_node.rpc_port == node_to_replace.rpc_port allowed_to_suspend_count = network.get_f() - len(network.get_stopped_nodes()) backups_to_suspend = backups[:allowed_to_suspend_count] LOG.info( f"Suspending {len(backups_to_suspend)} other nodes to make progress depend on the replacement" ) for other_backup in backups_to_suspend: other_backup.suspend() # Confirm the network can make progress check_can_progress(primary) for other_backup in backups_to_suspend: other_backup.resume() return network
def test_jwt_key_refresh_aad(network, args): primary, _ = network.find_nodes() hostname = "login.microsoftonline.com" ctx = ssl.create_default_context() with ctx.wrap_socket(socket.socket(), server_hostname=hostname) as s: s.connect((hostname, 443)) ca_der = ctx.get_ca_certs(binary_form=True)[0] ca_pem = infra.crypto.cert_der_to_pem(ca_der) LOG.info("Add CA cert for JWT issuer") with tempfile.NamedTemporaryFile(prefix="ccf", mode="w+") as ca_cert_bundle_fp: ca_cert_bundle_fp.write(ca_pem) ca_cert_bundle_fp.flush() network.consortium.set_ca_cert_bundle(primary, "aad", ca_cert_bundle_fp.name) issuer = "https://login.microsoftonline.com/common/v2.0/" with tempfile.NamedTemporaryFile(prefix="ccf", mode="w+") as metadata_fp: json.dump( { "issuer": issuer, "auto_refresh": True, "ca_cert_bundle_name": "aad", }, metadata_fp, ) metadata_fp.flush() network.consortium.set_jwt_issuer(primary, metadata_fp.name) LOG.info("Check that keys got refreshed") # Auto-refresh interval has been set to a large value so that it doesn't happen within the timeout. # This is testing the one-off refresh after adding a new issuer. with_timeout(lambda: check_kv_jwt_keys_not_empty(network, issuer), timeout=5)
def test_jwt_endpoint(network, args): primary, _ = network.find_nodes() keys = { infra.jwt_issuer.JwtIssuer("issuer1"): ["issuer1_kid1", "issuer1_kid2"], infra.jwt_issuer.JwtIssuer("issuer2"): ["issuer2_kid1", "issuer2_kid2"], } LOG.info("Register JWT issuer with multiple kids") for issuer, kids in keys.items(): with tempfile.NamedTemporaryFile(prefix="ccf", mode="w+") as metadata_fp: json.dump({"issuer": issuer.name}, metadata_fp) metadata_fp.flush() network.consortium.set_jwt_issuer(primary, metadata_fp.name) with tempfile.NamedTemporaryFile(prefix="ccf", mode="w+") as jwks_fp: json.dump(issuer.create_jwks_for_kids(kids), jwks_fp) jwks_fp.flush() network.consortium.set_jwt_public_signing_keys( primary, issuer.name, jwks_fp.name ) LOG.info("Check that JWT endpoint returns all keys and issuers") with primary.client(network.consortium.get_any_active_member().local_id) as c: r = c.get("/gov/jwt_keys/all") assert r.status_code == 200, r info = r.body.json() for issuer, kids in keys.items(): for kid in kids: assert kid in info, r assert info[kid]["issuer"] == issuer.name assert info[kid]["cert"] == issuer.cert_pem
def test_npm_tsoa_app(network, args): primary, _ = network.find_nodes() LOG.info("Building tsoa npm app") app_dir = os.path.join(PARENT_DIR, "npm-tsoa-app") subprocess.run(["npm", "install"], cwd=app_dir, check=True) subprocess.run(["npm", "run", "build"], cwd=app_dir, check=True) LOG.info("Deploying tsoa npm app") bundle_dir = os.path.join(app_dir, "dist") network.consortium.deploy_js_app(primary, bundle_dir) LOG.info("Calling tsoa npm app endpoints") with primary.client("user0") as c: body = [1, 2, 3, 4] r = c.post("/app/partition", body) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body.json() == [[1, 3], [2, 4]], r.body r = c.post("/app/proto", body) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/x-protobuf" # We could now decode the protobuf message but given all the machinery # involved to make it happen (code generation with protoc) we'll leave it at that. assert len(r.body) == 14, len(r.body) r = c.get("/app/crypto") assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body.json()["available"], r.body validate_openapi(c) return network
def test_host_process_launch_many(network, args): primary, _ = network.find_nodes() with tempfile.TemporaryDirectory() as tmp_dir: script_path = os.path.join(os.path.dirname(__file__), "host_process.sh") count = 100 with primary.client("user0") as c: r = c.post( "/app/launch_many", body={ "program": script_path, "out_dir": tmp_dir, "count": count }, ) assert r.status_code == http.HTTPStatus.OK, r.status_code pending = set(range(count)) timeout = 2 t0 = time.time() while time.time() - t0 < timeout: for i in list(pending): if os.path.exists(os.path.join(tmp_dir, f"{i}")): pending.remove(i) if not pending: break time.sleep(0.1) assert not pending, f"{len(pending)} pending host processes after {timeout}s" return network
def test_host_process_launch(network, args): primary, _ = network.find_nodes() with tempfile.TemporaryDirectory() as tmp_dir: script_path = os.path.join(os.path.dirname(__file__), "host_process.sh") out_path = os.path.join(tmp_dir, "test.out") expected_content = "Hello world!" args = [script_path, expected_content, out_path] with primary.client("user0") as c: r = c.post("/app/launch", body={"args": args}) assert r.status_code == http.HTTPStatus.OK, r.status_code timeout = 1 t0 = time.time() while time.time() - t0 < timeout: if os.path.exists(out_path): break time.sleep(0.1) assert os.path.exists( out_path), f"host process did not run within {timeout}s" with open(out_path, encoding="utf-8") as f: content = f.read() assert expected_content == content, content return network
def test_module_import(network, args): primary, _ = network.find_nodes() # Add module with tempfile.NamedTemporaryFile("w") as f: f.write(MODULE_CONTENT) f.flush() proposal_body, _ = ccf.proposal_generator.set_module("foo.js", f.name) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal) # Update JS app which imports module with tempfile.NamedTemporaryFile("w") as f: f.write(APP_SCRIPT) f.flush() network.consortium.set_js_app(remote_node=primary, app_script_path=f.name) with primary.client("user0") as c: r = c.post("/app/test_module", {}) assert r.status_code == 200, r.status_code assert r.body == MODULE_RETURN return network
def test_proposal_invalidation(network, args): primary, _ = network.find_nodes() LOG.info("Create an open proposal") pending_proposals = [] with primary.client(None, "member0") as c: new_member_proposal, _, _ = network.consortium.generate_and_propose_new_member( primary, curve=args.participants_curve) pending_proposals.append(new_member_proposal.proposal_id) LOG.info("Add temporary code ID") temp_code_id = infra.utils.get_code_id(args.enclave_type, args.oe_binary, get_replacement_package(args)) network.consortium.add_new_code(primary, temp_code_id) LOG.info("Confirm open proposals are dropped") with primary.client(None, "member0") as c: for proposal_id in pending_proposals: r = c.get(f"/gov/proposals/{proposal_id}") assert r.status_code == 200, r.body.text() assert r.body.json()["state"] == "Dropped", r.body.json() LOG.info("Remove temporary code ID") network.consortium.retire_code(primary, temp_code_id) return network
def test_consensus_status(network, args): primary, _ = network.find_nodes() with primary.client() as c: r = c.get("/node/consensus") assert r.status_code == http.HTTPStatus.OK.value assert r.body.json()["details"]["leadership_state"] == "Leader" return network
def test_module_set_and_remove(network, args): primary, _ = network.find_nodes() LOG.info("Member makes a module update proposal") with tempfile.NamedTemporaryFile("w") as f: f.write(MODULE_CONTENT) f.flush() proposal_body, _ = ccf.proposal_generator.set_module("foo.js", f.name) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal) with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", {"table": "ccf.modules", "key": "foo.js"}) assert r.status_code == 200, r.status_code assert r.body["js"] == MODULE_CONTENT, r.body LOG.info("Member makes a module remove proposal") proposal_body, _ = ccf.proposal_generator.remove_module("foo.js") proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal) with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", {"table": "ccf.modules", "key": "foo.js"}) assert r.status_code == 400, r.status_code return network
def test_accept_header(network, args): primary, _ = network.find_nodes() with primary.client() as c: r = c.get("/node/commit", headers={"accept": "nonsense"}) assert r.status_code == http.HTTPStatus.BAD_REQUEST.value r = c.get("/node/commit", headers={"accept": "text/html"}) assert r.status_code == http.HTTPStatus.NOT_ACCEPTABLE.value r = c.get( "/node/commit", headers={"accept": "text/html;q=0.9,image/jpeg;video/mpeg;q=0.8"}, ) assert r.status_code == http.HTTPStatus.NOT_ACCEPTABLE.value r = c.get("/node/commit", headers={"accept": "*/*"}) assert r.status_code == http.HTTPStatus.OK.value r = c.get("/node/commit", headers={"accept": "application/*"}) assert r.status_code == http.HTTPStatus.OK.value r = c.get("/node/commit", headers={"accept": "application/json"}) assert r.status_code == http.HTTPStatus.OK.value assert r.headers["content-type"] == "application/json" r = c.get("/node/commit", headers={"accept": "application/msgpack"}) assert r.status_code == http.HTTPStatus.OK.value assert r.headers["content-type"] == "application/msgpack" r = c.get( "/node/commit", headers={ "accept": "text/html;q=0.9,image/jpeg;video/mpeg;q=0.8,*/*;q=0.1" }, ) assert r.status_code == http.HTTPStatus.OK.value r = c.get( "/node/commit", headers={ "accept": "text/html;q=0.9,image/jpeg;video/mpeg;q=0.8,application/json;q=0.1" }, ) assert r.status_code == http.HTTPStatus.OK.value assert r.headers["content-type"] == "application/json" r = c.get( "/node/commit", headers={ "accept": "text/html;q=0.9,image/jpeg;video/mpeg;q=0.8,application/msgpack;q=0.1" }, ) assert r.status_code == http.HTTPStatus.OK.value assert r.headers["content-type"] == "application/msgpack" return network
def test_content_types(network, args): primary, _ = network.find_nodes() with tempfile.NamedTemporaryFile("w") as f: f.write(APP_SCRIPT) f.flush() network.consortium.set_js_app(remote_node=primary, app_script_path=f.name) with primary.client("user0") as c: r = c.post("/app/text", body="text") assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "text/plain" assert r.body.text() == "text" r = c.post("/app/json", body={"foo": "bar"}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/json" assert r.body.json() == {"foo": "bar"} r = c.post("/app/binary", body=b"\x00" * 42) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/octet-stream" assert r.body.data() == b"\x00" * 42, r.body r = c.post("/app/custom", body="text", headers={"content-type": "foo/bar"}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "text/plain" assert r.body.text() == "text" return network
def make_module_set_proposal(module_path, file_path, network): primary, _ = network.find_nodes() proposal_body, careful_vote = ccf.proposal_generator.set_module( module_path, file_path) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal, careful_vote)
def test_create_endpoint(network, args): primary, _ = network.find_nodes() with primary.client() as c: r = c.post("/node/create") assert r.status_code == http.HTTPStatus.FORBIDDEN.value assert r.body.json()["error"]["message"] == "Node is not in initial state." return network
def test_unknown_path(network, args): primary, _ = network.find_nodes() with primary.client("user0") as c: r = c.get("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.post("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.delete("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.post("/app/unknown") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code with primary.client() as c: r = c.get("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.post("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.delete("/app/not/a/real/path") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code r = c.post("/app/unknown") assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code return network
def test_historical_receipts(network, args): if args.consensus == "bft": LOG.warning("Skipping historical queries in BFT") return network primary, backups = network.find_nodes() cert_path = os.path.join(primary.common_dir, f"{primary.local_node_id}.pem") with open(cert_path) as c: primary_cert = load_pem_x509_certificate( c.read().encode("ascii"), default_backend() ) TXS_COUNT = 5 network.txs.issue(network, number_txs=5) for idx in range(1, TXS_COUNT + 1): for node in [primary, backups[0]]: first_msg = network.txs.priv[idx][0] first_receipt = network.txs.get_receipt( node, idx, first_msg["seqno"], first_msg["view"] ) r = first_receipt.json()["receipt"] assert r["root"] == ccf.receipt.root(r["leaf"], r["proof"]) ccf.receipt.verify(r["root"], r["signature"], primary_cert) # receipt.verify() raises if it fails, but does not return anything verified = True try: ccf.receipt.verify( hashlib.sha256(b"").hexdigest(), r["signature"], primary_cert ) except InvalidSignature: verified = False assert not verified return network
def test_isolate_and_reconnect_primary(network, args, **kwargs): primary, backups = network.find_nodes() with network.partitioner.partition(backups): lost_tx_resp = check_does_not_progress(primary) new_primary, _ = network.wait_for_new_primary(primary, nodes=backups, timeout_multiplier=6) new_tx_resp = check_can_progress(new_primary) # Check reconnected former primary has caught up with primary.client() as c: try: # There will be at least one full election cycle for nothing, where the # re-joining node fails to get elected but causes others to rev up their # term. After that, a successful election needs to take place, and we # arbitrarily allow 3 time periods to avoid being too brittle when # raft timeouts line up badly. c.wait_for_commit(new_tx_resp, timeout=(network.election_duration * 4)) except TimeoutError: details = c.get("/node/consensus").body.json() assert ( False ), f"Stuck before {new_tx_resp.view}.{new_tx_resp.seqno}: {pprint.pformat(details)}" # Check it has dropped anything submitted while partitioned r = c.get( f"/node/tx?transaction_id={lost_tx_resp.view}.{lost_tx_resp.seqno}" ) status = TxStatus(r.body.json()["status"]) assert status == TxStatus.Invalid, r
def test_supported_methods(network, args): primary, _ = network.find_nodes() with primary.client("user0") as c: # Test ALLOW header when wrong method is used r = c.delete("/app/text") assert r.status_code == http.HTTPStatus.METHOD_NOT_ALLOWED allow = r.headers.get("allow") assert allow is not None assert "OPTIONS" in allow assert "POST" in allow # Test ALLOW header when OPTIONS method is used on POST-only app endpoint r = c.options("/app/text") assert r.status_code == http.HTTPStatus.NO_CONTENT allow = r.headers.get("allow") assert allow is not None assert "OPTIONS" in allow assert "POST" in allow # Test ALLOW header when OPTIONS method is used on GET-only framework endpoint r = c.options("/node/commit") assert r.status_code == http.HTTPStatus.NO_CONTENT allow = r.headers.get("allow") assert allow is not None assert "OPTIONS" in allow assert "GET" in allow return network
def test_content_types(network, args): primary, _ = network.find_nodes() with primary.client("user0") as c: r = c.post("/app/text", body="text") assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "text/plain" assert r.body.text() == "text" r = c.post("/app/json", body={"foo": "bar"}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/json" assert r.body.json() == {"foo": "bar"} r = c.post("/app/binary", body=b"\x00" * 42) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/octet-stream" assert r.body.data() == b"\x00" * 42, r.body r = c.post("/app/custom", body="text", headers={"content-type": "foo/bar"}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "text/plain" assert r.body.text() == "text" return network
def test_module_set_and_remove(network, args): primary, _ = network.find_nodes() LOG.info("Member makes a module update proposal") make_module_set_proposal(MODULE_PATH_1, MODULE_CONTENT_1, network) with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1}) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body["js"] == MODULE_CONTENT_1, r.body LOG.info("Member makes a module remove proposal") proposal_body, _ = ccf.proposal_generator.remove_module(MODULE_PATH_1) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body ) network.consortium.vote_using_majority(primary, proposal) with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", {"table": "ccf.modules", "key": MODULE_PATH_1}) assert r.status_code == http.HTTPStatus.BAD_REQUEST, r.status_code return network
def test_isolate_primary_from_one_backup(network, args): primary, backups = network.find_nodes() # Issue one transaction, waiting for all nodes to be have reached # the same level of commit, so that nodes outside of partition can # become primary after this one is dropped # Note: Because of https://github.com/microsoft/CCF/issues/2224, we need to # issue a write transaction instead of just reading the TxID of the latest entry network.txs.issue(network) # Isolate first backup from primary so that first backup becomes candidate # in a new term and wins the election # Note: Managed manually rules = network.partitioner.isolate_node(primary, backups[0]) new_primary, new_view = network.wait_for_new_primary(primary, nodes=backups, timeout_multiplier=6) # Explicitly drop rules before continuing rules.drop() # Old primary should now report of the new primary new_primary_, new_view_ = network.wait_for_new_primary(primary, nodes=[primary]) assert ( new_primary == new_primary_ ), f"New primary {new_primary_.local_node_id} after partition is dropped is different than before {new_primary.local_node_id}" assert ( new_view == new_view_ ), f"Consensus view {new_view} should not changed after partition is dropped: no {new_view_}" return network
def test_partition_majority(network, args): primary, backups = network.find_nodes() # Create a partition with primary + half remaining nodes (i.e. majority) partition = [primary] partition.extend(backups[len(backups) // 2:]) # Wait for all nodes to be have reached the same level of commit, so that # nodes outside of partition can become primary after this one is dropped network.wait_for_all_nodes_to_commit(primary=primary) # The primary should remain stable while the partition is active # Note: Context manager initial_view = None with network.partitioner.partition(partition): try: network.wait_for_new_primary(primary) assert False, "No new primary should be elected when partitioning majority" except TimeoutError: LOG.info("No new primary, as expected") with primary.client() as c: res = c.get("/node/network") # Well-known read-only endpoint body = res.body.json() initial_view = body["current_view"] # The partitioned nodes will have called elections, increasing their view. # When the partition is lifted, the nodes must elect a new leader, in at least this # increased term. The winning node could come from either partition, and could even # be the original primary. network.wait_for_primary_unanimity(min_view=initial_view) return network
def test_cert_store(network, args): primary, _ = network.find_nodes() LOG.info("Member builds a ca cert update proposal with malformed cert") with tempfile.NamedTemporaryFile("w") as f: f.write("foo") f.flush() try: proposal_body, _ = ccf.proposal_generator.update_ca_cert("mycert", f.name) except ValueError: pass else: assert False, "update_ca_cert should have raised an error" LOG.info("Member makes a ca cert update proposal with malformed cert") with tempfile.NamedTemporaryFile("w") as f: f.write("foo") f.flush() proposal_body, _ = ccf.proposal_generator.update_ca_cert( "mycert", f.name, skip_checks=True ) try: proposal = network.consortium.get_any_active_member().propose( primary, proposal_body ) except infra.proposal.ProposalNotCreated: pass else: assert False, "Proposal should not have been created" LOG.info("Member makes a ca cert update proposal with valid cert") ca_cert_path = os.path.join(this_dir, "ca_cert.pem") proposal_body, _ = ccf.proposal_generator.update_ca_cert("mycert", ca_cert_path) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body ) assert proposal.state == ProposalState.Accepted with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", {"table": "ccf.ca_cert_ders", "key": "mycert"}) assert r.status_code == 200, r.status_code cert_pem_str = open(ca_cert_path).read() cert_ref = x509.load_pem_x509_certificate( cert_pem_str.encode(), crypto_backends.default_backend() ) cert_kv = x509.load_der_x509_certificate( # Note that /gov/read returns all data as JSON. # Here, the stored data is a uint8 array, therefore it # is returned as an array of integers. bytes(r.body.json()), crypto_backends.default_backend(), ) assert ( cert_ref == cert_kv ), f"stored cert not equal to input cert: {cert_ref} != {cert_kv}" return network
def test_npm_tsoa_app(network, args): primary, _ = network.find_nodes() LOG.info("Building npm app") app_dir = os.path.join(THIS_DIR, "npm-tsoa-app") subprocess.run(["npm", "install"], cwd=app_dir, check=True) subprocess.run(["npm", "run", "build"], cwd=app_dir, check=True) LOG.info("Deploying npm app modules") module_name_prefix = "/my-tsoa-npm-app/" dist_dir = os.path.join(app_dir, "dist") proposal_body, _ = ccf.proposal_generator.update_modules( module_name_prefix, dist_dir) proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal) LOG.info("Deploying endpoint script") metadata_path = os.path.join(dist_dir, "endpoints.json") with open(metadata_path) as f: metadata = json.load(f) # Temporarily only until endpoints can be called directly without proxy. app_script = "return {" for url, methods in metadata["endpoints"].items(): for method, cfg in methods.items(): app_script += f""" ["{method.upper()} {url[1:]}"] = [[ import {{ {cfg["js_function"]} as f }} from ".{module_name_prefix}{cfg["js_module"]}"; export default (request) => f(request); ]],""" app_script = app_script[:-1] + "\n}" with tempfile.NamedTemporaryFile("w") as f: f.write(app_script) f.flush() network.consortium.set_js_app(remote_node=primary, app_script_path=f.name) LOG.info("Calling npm app endpoints") with primary.client("user0") as c: body = [1, 2, 3, 4] r = c.post("/app/partition", body) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body.json() == [[1, 3], [2, 4]], r.body r = c.post("/app/proto", body) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/x-protobuf" # We could now decode the protobuf message but given all the machinery # involved to make it happen (code generation with protoc) we'll leave it at that. assert len(r.body) == 14, len(r.body) r = c.get("/app/crypto") assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.body.json()["available"], r.body return network
def test_quote(network, args): primary, _ = network.find_nodes() with primary.client() as c: oed = subprocess.run( [ os.path.join(args.oe_binary, "oesign"), "dump", "-e", infra.path.build_lib_path(args.package, args.enclave_type), ], capture_output=True, check=True, ) lines = [ line for line in oed.stdout.decode().split(os.linesep) if line.startswith("mrenclave=") ] expected_mrenclave = lines[0].strip().split("=")[1] r = c.get("/node/quotes/self") primary_quote_info = r.body.json() assert primary_quote_info["node_id"] == 0 primary_mrenclave = primary_quote_info["mrenclave"] assert primary_mrenclave == expected_mrenclave, ( primary_mrenclave, expected_mrenclave, ) r = c.get("/node/quotes") quotes = r.body.json()["quotes"] assert len(quotes) == len(network.get_joined_nodes()) for quote in quotes: mrenclave = quote["mrenclave"] assert mrenclave == expected_mrenclave, (mrenclave, expected_mrenclave) quote_path = os.path.join(network.common_dir, f"quote{quote['node_id']}") endorsements_path = os.path.join( network.common_dir, f"endorsements{quote['node_id']}") with open(quote_path, "wb") as q: q.write(bytes.fromhex(quote["raw"])) with open(endorsements_path, "wb") as e: e.write(bytes.fromhex(quote["endorsements"])) cafile = os.path.join(network.common_dir, "networkcert.pem") assert (infra.proc.ccall( "verify_quote.sh", f"https://{primary.pubhost}:{primary.pubport}", "--cacert", f"{cafile}", log_output=True, ).returncode == 0 ), f"Quote verification for node {quote['node_id']} failed" return network
def test_app_bundle(network, args): primary, _ = network.find_nodes() LOG.info("Deploying js app bundle archive") # Testing the bundle archive support of the Python client here. # Plain bundle folders are tested in the npm-based app tests. bundle_dir = os.path.join(PARENT_DIR, "js-app-bundle") with tempfile.TemporaryDirectory(prefix="ccf") as tmp_dir: bundle_path = shutil.make_archive(os.path.join(tmp_dir, "bundle"), "zip", bundle_dir) network.consortium.deploy_js_app(primary, bundle_path) LOG.info("Verifying that modules and endpoints were added") with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", { "table": "public:ccf.gov.modules", "key": "/math.js" }) assert r.status_code == http.HTTPStatus.OK, r.status_code with primary.client("user0") as c: valid_body = {"op": "sub", "left": 82, "right": 40} r = c.post("/app/compute", valid_body) assert r.status_code == http.HTTPStatus.OK, r.status_code assert r.headers["content-type"] == "application/json" assert r.body.json() == {"result": 42}, r.body invalid_body = {"op": "add", "left": "1", "right": 2} r = c.post("/app/compute", invalid_body) assert r.status_code == http.HTTPStatus.BAD_REQUEST, r.status_code assert r.headers["content-type"] == "application/json" assert r.body.json() == {"error": "invalid operand type"}, r.body validate_openapi(c) LOG.info("Removing js app") proposal_body, careful_vote = ccf.proposal_generator.remove_js_app() proposal = network.consortium.get_any_active_member().propose( primary, proposal_body) network.consortium.vote_using_majority(primary, proposal, careful_vote) LOG.info("Verifying that modules and endpoints were removed") with primary.client("user0") as c: r = c.post("/app/compute", valid_body) assert r.status_code == http.HTTPStatus.NOT_FOUND, r.status_code with primary.client( f"member{network.consortium.get_any_active_member().member_id}" ) as c: r = c.post("/gov/read", { "table": "public:ccf.gov.modules", "key": "/math.js" }) assert r.status_code == http.HTTPStatus.BAD_REQUEST, r.status_code return network