예제 #1
0
def test_slashing(cluster):
    "stop node2, wait for non-live slashing"
    addr = cluster.address("validator", i=2)
    val_addr = cluster.address("validator", i=2, bech="val")
    tokens1 = int((cluster.validator(val_addr))["tokens"])

    print("tokens before slashing", tokens1)
    print("stop and wait for 10 blocks")
    cluster.supervisor.stopProcess(f"{cluster.chain_id}-node2")
    wait_for_new_blocks(cluster, 10)
    cluster.supervisor.startProcess(f"{cluster.chain_id}-node2")
    wait_for_port(rpc_port(cluster.base_port(2)))

    val = cluster.validator(val_addr)
    tokens2 = int(val["tokens"])
    print("tokens after slashing", tokens2)
    assert tokens2 == int(tokens1 * 0.99), "slash amount is not correct"

    assert val["jailed"], "validator is jailed"

    # try to unjail
    rsp = cluster.unjail(addr, i=2)
    assert rsp["code"] == 4, "still jailed, can't be unjailed"

    # wait for 60s and unjail again
    wait_for_block_time(
        cluster,
        isoparse(val["unbonding_time"]) + datetime.timedelta(seconds=60))
    rsp = cluster.unjail(addr, i=2)
    assert rsp["code"] == 0, f"unjail should success {rsp}"

    wait_for_new_blocks(cluster, 3)
    assert len(cluster.validators()) == 3
예제 #2
0
def cluster_fixture_aux(config_path, data_folder, cmd):
    base_port = gen_base_port()
    print("init cluster at", data_folder, ", base port:", base_port)
    cluster.init_cluster(data_folder, config_path, base_port, cmd=cmd)

    config = yaml.safe_load(open(config_path))
    clis = {}
    for key in config:
        chain_id = key
        clis[chain_id] = cluster.ClusterCLI(data_folder, chain_id=chain_id, cmd=cmd)

    supervisord = cluster.start_cluster(data_folder)

    try:
        for cli in clis.values():
            # wait for first node rpc port available before start testing
            wait_for_port(rpc_port(cli.config["validators"][0]["base_port"]))
            # wait for first node grpc port available before start testing
            wait_for_port(grpc_port(cli.config["validators"][0]["base_port"]))
            # wait for the first block generated before start testing
            wait_for_block(cli, 2)

        if len(clis) == 1:
            yield list(clis.values())[0]
        else:
            yield clis
    finally:
        supervisord.terminate()
        supervisord.wait()
예제 #3
0
def cluster_fixture(
    config_path,
    base_port,
    tmp_path_factory,
    quiet=False,
    post_init=None,
    enable_cov=None,
):
    if enable_cov is None:
        enable_cov = os.environ.get("GITHUB_ACTIONS") == "true"
    config = yaml.safe_load(open(config_path))
    data = tmp_path_factory.mktemp(config["chain_id"])
    print("init cluster at", data, ", base port:", base_port)
    cluster.init_cluster(data, config, base_port)

    if post_init:
        post_init(config, data)

    if enable_cov:
        # replace the first node with the instrumented binary
        ini = data / cluster.SUPERVISOR_CONFIG_FILE
        ini.write_text(
            re.sub(
                r"^command = (.*/)?chain-maind",
                "command = chain-maind-inst -test.coverprofile=%(here)s/coverage.txt",
                ini.read_text(),
                count=1,
                flags=re.M,
            )
        )
        begin = time.time()

    supervisord = cluster.start_cluster(data)
    if not quiet:
        tailer = cluster.TailLogsThread([str(data / "node*.log")])
        tailer.start()
    # wait for first node rpc port available before start testing
    wait_for_port(rpc_port(config["validators"][0]["base_port"]))
    cli = cluster.ClusterCLI(data)
    # wait for first block generated before start testing
    wait_for_block(cli, 1)

    yield cli

    if enable_cov:
        # wait for server startup complete to generate the coverage report
        duration = time.time() - begin
        if duration < 15:
            time.sleep(15 - duration)

    supervisord.terminate()
    supervisord.wait()

    if not quiet:
        tailer.stop()
        tailer.join()

    if enable_cov:
        # collect the coverage results
        shutil.move(str(data / "coverage.txt"), f"coverage.{uuid.uuid1()}.txt")
예제 #4
0
def test_manual_export(cosmovisor_cluster):
    """
    - do chain state export, override the genesis time to the genesis file
    - ,and reset the data set
    - see https://github.com/crypto-org-chain/chain-main/issues/289
    """

    cluster = cosmovisor_cluster
    edit_chain_program(
        cluster.chain_id,
        cluster.data_dir / SUPERVISOR_CONFIG_FILE,
        lambda i, _: {
            "command": f"%(here)s/node{i}/cosmovisor/genesis/bin/chain-maind start "
            f"--home %(here)s/node{i}"
        },
    )

    cluster.reload_supervisor()
    wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
    # wait for a new block to make sure chain started up
    wait_for_new_blocks(cluster, 1)
    cluster.supervisor.stopAllProcesses()

    # check the state of all nodes should be stopped
    for info in cluster.supervisor.getAllProcessInfo():
        assert info["statename"] == "STOPPED"

    # export the state
    cluster.cmd = (
        cluster.data_root
        / cluster.chain_id
        / "node0/cosmovisor/genesis/bin/chain-maind"
    )
    cluster.cosmos_cli(0).export()

    # update the genesis time = current time + 5 secs
    newtime = datetime.utcnow() + timedelta(seconds=5)
    cluster.config["genesis-time"] = newtime.replace(tzinfo=None).isoformat("T") + "Z"

    for i in range(cluster.nodes_len()):
        migrate_genesis_time(cluster, i)
        cluster.validate_genesis(i)
        cluster.cosmos_cli(i).unsaferesetall()

    cluster.supervisor.startAllProcesses()

    wait_for_new_blocks(cluster, 1)

    cluster.supervisor.stopAllProcesses()

    # check the state of all nodes should be stopped
    for info in cluster.supervisor.getAllProcessInfo():
        assert info["statename"] == "STOPPED"
예제 #5
0
def test_join_validator(cluster):
    i = cluster.create_node(moniker="new joined")
    addr = cluster.address("validator", i)
    # transfer 1cro from ecosystem account
    assert cluster.transfer(cluster.address("ecosystem"), addr,
                            "1cro")["code"] == 0
    assert cluster.balance(addr) == 10**8

    # start the node
    cluster.supervisor.startProcess(f"{cluster.chain_id}-node{i}")
    wait_for_port(rpc_port(cluster.base_port(i)))

    count1 = len(cluster.validators())

    # wait for the new node to sync
    wait_for_block(cluster.cosmos_cli(i), cluster.block_height())

    # wait for the new node to sync
    wait_for_block(cluster.cosmos_cli(i), cluster.block_height(0))
    # create validator tx
    assert cluster.create_validator("1cro", i)["code"] == 0
    time.sleep(2)

    count2 = len(cluster.validators())
    assert count2 == count1 + 1, "new validator should joined successfully"

    val_addr = cluster.address("validator", i, bech="val")
    val = cluster.validator(val_addr)
    assert not val["jailed"]
    assert val["status"] == "BOND_STATUS_BONDED"
    assert val["tokens"] == str(10**8)
    assert val["description"]["moniker"] == "new joined"
    assert val["commission"]["commission_rates"] == {
        "rate": "0.100000000000000000",
        "max_rate": "0.200000000000000000",
        "max_change_rate": "0.010000000000000000",
    }
    assert (cluster.edit_validator(i, commission_rate="0.2")["code"] == 12
            ), "commission cannot be changed more than once in 24h"
    assert cluster.edit_validator(i, moniker="awesome node")["code"] == 0
    assert cluster.validator(
        val_addr)["description"]["moniker"] == "awesome node"
예제 #6
0
def test_cancel_upgrade(cluster):
    """
    use default cluster
    - propose upgrade and pass it
    - cancel the upgrade before execution
    """
    plan_name = "upgrade-test"
    time.sleep(
        5
    )  # FIXME the port seems still exists for a while after process stopped
    wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
    upgrade_height = cluster.block_height() + 30
    print("propose upgrade plan")
    print("upgrade height", upgrade_height)
    propose_and_pass(
        cluster,
        "software-upgrade",
        {
            "name": plan_name,
            "title": "upgrade test",
            "description": "ditto",
            "upgrade-height": upgrade_height,
            "deposit": "0.1cro",
        },
    )

    print("cancel upgrade plan")
    propose_and_pass(
        cluster,
        "cancel-software-upgrade",
        {
            "title": "there is bug, cancel upgrade",
            "description": "there is bug, cancel upgrade",
            "deposit": "0.1cro",
        },
    )

    # wait for blocks after upgrade, should success since upgrade is canceled
    wait_for_block(cluster, upgrade_height + 2)
예제 #7
0
def test_manual_upgrade(cosmovisor_cluster):
    """
    - do the upgrade test by replacing binary manually
    - check the panic do happens
    """
    cluster = cosmovisor_cluster
    # use the normal binary first
    edit_chain_program(
        cluster.chain_id,
        cluster.data_dir / SUPERVISOR_CONFIG_FILE,
        lambda i, _: {
            "command": f"%(here)s/node{i}/cosmovisor/genesis/bin/chain-maind start "
            f"--home %(here)s/node{i}"
        },
    )
    cluster.reload_supervisor()
    time.sleep(5)  # FIXME the port seems still exists for a while after process stopped
    wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
    # wait for a new block to make sure chain started up
    wait_for_new_blocks(cluster, 1)
    target_height = cluster.block_height() + 15

    upgrade(cluster, "v2.0.0", target_height)
예제 #8
0
def cluster_fixture(config_path, base_port, tmp_path_factory, quiet=False):
    config = yaml.safe_load(open(config_path))
    data = tmp_path_factory.mktemp(config["chain_id"])
    print("init cluster at", data, ", base port:", base_port)
    cluster.init_cluster(data, config, base_port)

    # replace the first node with the instrumented binary
    ini = data / cluster.SUPERVISOR_CONFIG_FILE
    ini.write_text(
        re.sub(
            r"^command = (.*/)?chain-maind",
            "command = chain-maind-inst -test.coverprofile=%(here)s/coverage.txt",
            ini.read_text(),
            count=1,
            flags=re.M,
        ))
    begin = time.time()

    supervisord = cluster.start_cluster(data, quiet=quiet)
    # wait for first node rpc port available before start testing
    wait_for_port(rpc_port(config["validators"][0]["base_port"]))
    cli = cluster.ClusterCLI(data)
    # wait for first block generated before start testing
    wait_for_block(cli, 1)

    yield cli

    duration = time.time() - begin
    # wait for server startup complete to generate the coverage report
    if duration < 15:
        time.sleep(15 - duration)

    supervisord.terminate()
    supervisord.wait()

    # collect the coverage results
    shutil.move(str(data / "coverage.txt"), f"coverage.{uuid.uuid1()}.txt")
예제 #9
0
def cluster_fixture(
    config_path,
    base_port,
    tmp_path_factory,
    quiet=False,
    post_init=None,
    enable_cov=None,
):
    """
    init a single devnet
    """
    if enable_cov is None:
        enable_cov = os.environ.get("GITHUB_ACTIONS") == "true"
    data = tmp_path_factory.mktemp("data")
    print("init cluster at", data, ", base port:", base_port)
    cluster.init_cluster(data, config_path, base_port)

    config = yaml.safe_load(open(config_path))
    clis = {}
    for key in config:
        if key == "relayer":
            continue

        chain_id = key
        chain_data = data / chain_id

        if post_init:
            post_init(chain_id, chain_data)

        if enable_cov:
            # replace the first node with the instrumented binary
            ini = chain_data / cluster.SUPERVISOR_CONFIG_FILE
            ini.write_text(
                re.sub(
                    r"^command = (.*/)?chain-maind",
                    "command = chain-maind-inst "
                    "-test.coverprofile=%(here)s/coverage.txt",
                    ini.read_text(),
                    count=1,
                    flags=re.M,
                )
            )
        clis[chain_id] = cluster.ClusterCLI(data, chain_id)

    supervisord = cluster.start_cluster(data)
    if not quiet:
        tailer = cluster.start_tail_logs_thread(data)

    try:
        begin = time.time()
        for cli in clis.values():
            # wait for first node rpc port available before start testing
            wait_for_port(rpc_port(cli.config["validators"][0]["base_port"]))
            # wait for the first block generated before start testing
            wait_for_block(cli, 1)

        if len(clis) == 1:
            yield list(clis.values())[0]
        else:
            yield clis

        if enable_cov:
            # wait for server startup complete to generate the coverage report
            duration = time.time() - begin
            if duration < 15:
                time.sleep(15 - duration)
    finally:
        supervisord.terminate()
        supervisord.wait()
        if not quiet:
            tailer.stop()
            tailer.join()

    if enable_cov:
        # collect the coverage results
        shutil.move(str(chain_data / "coverage.txt"), f"coverage.{uuid.uuid1()}.txt")
예제 #10
0
def test_manual_upgrade(cosmovisor_cluster):
    """
    - do the upgrade test by replacing binary manually
    - check the panic do happens
    """
    cluster = cosmovisor_cluster
    # use the normal binary first
    edit_chain_program(
        cluster.chain_id,
        cluster.data_dir / SUPERVISOR_CONFIG_FILE,
        lambda i, _: {
            "command":
            f"%(here)s/node{i}/cosmovisor/genesis/bin/chain-maind start "
            f"--home %(here)s/node{i}"
        },
    )
    cluster.reload_supervisor()
    time.sleep(
        5
    )  # FIXME the port seems still exists for a while after process stopped
    wait_for_port(rpc_port(cluster.config["validators"][0]["base_port"]))
    # wait for a new block to make sure chain started up
    wait_for_new_blocks(cluster, 1)
    target_height = cluster.block_height() + 15
    print("upgrade height", target_height)

    plan_name = "v2.0.0"
    propose_and_pass(
        cluster,
        "software-upgrade",
        {
            "name": plan_name,
            "title": "upgrade test",
            "description": "ditto",
            "upgrade-height": target_height,
            "deposit": "0.1cro",
        },
    )

    # wait for upgrade plan activated
    wait_for_block(cluster, target_height)
    # wait a little bit
    time.sleep(0.5)

    # check nodes are all stopped
    assert (
        cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node0")["state"]
        != "RUNNING")
    assert (
        cluster.supervisor.getProcessInfo(f"{cluster.chain_id}-node1")["state"]
        != "RUNNING")

    # check upgrade-info.json file is written
    assert (json.load(
        (cluster.home(0) / "data/upgrade-info.json").open()) == json.load(
            (cluster.home(1) / "data/upgrade-info.json").open()) == {
                "name": plan_name,
                "height": target_height,
            })

    # use the upgrade-test binary
    edit_chain_program(
        cluster.chain_id,
        cluster.data_dir / SUPERVISOR_CONFIG_FILE,
        lambda i, _: {
            "command":
            (f"%(here)s/node{i}/cosmovisor/upgrades/v2.0.0/bin/chain-maind "
             f"start --home %(here)s/node{i}")
        },
    )
    cluster.reload_supervisor()

    # wait for it to generate new blocks
    wait_for_block(cluster, target_height + 2)
예제 #11
0
def cluster_fixture(
    config_path,
    worker_index,
    data,
    post_init=None,
    enable_cov=None,
    cmd=None,
):
    """
    init a single devnet
    """
    if enable_cov is None:
        enable_cov = os.environ.get("GITHUB_ACTIONS") == "true"
    base_port = gen_base_port(worker_index)
    print("init cluster at", data, ", base port:", base_port)
    cluster.init_cluster(data, config_path, base_port, cmd=cmd)

    config = yaml.safe_load(open(config_path))
    clis = {}
    for key in config:
        if key == "relayer":
            continue

        chain_id = key
        chain_data = data / chain_id

        if post_init:
            post_init(chain_id, chain_data)

        if enable_cov:
            # replace the first node with the instrumented binary
            ini = chain_data / cluster.SUPERVISOR_CONFIG_FILE
            ini.write_text(
                re.sub(
                    r"^command = (.*/)?chain-maind",
                    "command = chain-maind-inst "
                    "-test.coverprofile=%(here)s/coverage.txt",
                    ini.read_text(),
                    count=1,
                    flags=re.M,
                ))
        clis[chain_id] = cluster.ClusterCLI(data, chain_id=chain_id)

    supervisord = cluster.start_cluster(data)

    try:
        begin = time.time()
        for cli in clis.values():
            # wait for first node rpc port available before start testing
            wait_for_port(rpc_port(cli.config["validators"][0]["base_port"]))
            # wait for the first block generated before start testing
            wait_for_block(cli, 2)

        if len(clis) == 1:
            yield list(clis.values())[0]
        else:
            yield clis

        if enable_cov:
            # wait for server startup complete to generate the coverage report
            duration = time.time() - begin
            if duration < 15:
                time.sleep(15 - duration)
    finally:
        supervisord.terminate()
        supervisord.wait()

    if enable_cov:
        # collect the coverage results
        try:
            shutil.move(str(chain_data / "coverage.txt"),
                        f"coverage.{uuid.uuid1()}.txt")
        except FileNotFoundError:
            ts = time.time()
            st = datetime.datetime.fromtimestamp(ts).strftime(
                "%Y-%m-%d %H:%M:%S")
            print(st + " FAILED TO FIND COVERAGE")
            print(os.listdir(chain_data))
            data = [(int(p), c) for p, c in [
                x.rstrip("\n").split(" ", 1)
                for x in os.popen("ps h -eo pid:1,command")
            ]]
            print(data)