예제 #1
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--full_shard_id",
                        type=int,
                        help="full shard id to operate")
    parser.add_argument(
        "--all_shards",
        action="store_true",
        default=False,
        help="query balances in all shards",
    )
    parser.add_argument("--recipient",
                        default=None,
                        type=str,
                        help="query a specific recipient")
    parser.add_argument(
        "--minor_block_height",
        default=None,
        type=int,
        help="query balance at specific minor block height",
    )
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)

    # initialize database
    if not env.cluster_config.use_mem_db():
        env.db = PersistentDb(
            "{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
            clean=env.cluster_config.CLEAN,
        )

    return env, args
예제 #2
0
 def test_unknown_structure(self):
     parser = argparse.ArgumentParser()
     ClusterConfig.attach_arguments(parser)
     args = parser.parse_args(["--monitoring_kafka_rest_address=x"])
     cluster_config = ClusterConfig.create_from_args(args)
     sample = dict(a=1, b=2, c={"x", "y"})
     cluster_config.kafka_logger.log_kafka_sample(
         "topic", sample)  # should trigger warning log
예제 #3
0
 def test_kafka_log(self):
     parser = argparse.ArgumentParser()
     ClusterConfig.attach_arguments(parser)
     args = parser.parse_args(
         [])  # set --kafka_rest_address correctly to see real actions
     cluster_config = ClusterConfig.create_from_args(args)
     sample = dict(a=1, b=2, c=["x", "y"])
     cluster_config.kafka_logger.log_kafka_sample("dlltest", sample)
예제 #4
0
async def main():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--num_clusters", default=2, type=int)
    args = parser.parse_args()
    clusters = []
    mine_i = random.randint(0, args.num_clusters - 1)
    mine = args.start_simulated_mining
    if mine:
        print("cluster {} will be mining".format(mine_i))
    else:
        print("No one will be mining")

    db_path_root = args.db_path_root
    p2p_port = args.p2p_port
    for i in range(args.num_clusters):
        args.start_simulated_mining = mine and i == mine_i
        args.db_path_root = "{}_C{}".format(db_path_root, i)

        # set up p2p bootstrapping, with fixed bootstrap key for now
        if args.p2p:
            if i == 0:
                args.privkey = (
                    "31552f186bf90908ce386fb547dd0410bf443309125cc43fd0ffd642959bf6d9"
                )
            else:
                args.privkey = ""

            args.bootnodes = "enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:{}".format(
                p2p_port)

        config = ClusterConfig.create_from_args(args)
        print("Cluster {} config file: {}".format(i, config.json_filepath))
        print(config.to_json())

        clusters.append(
            cl.Cluster(
                config, "{}C{}{}_".format(colors[i % len(colors)], i,
                                          COLOR_END)))

        args.p2p_port += 1
        args.port_start += 100
        args.json_rpc_port += 1
        args.json_rpc_private_port += 1

    tasks = list()
    tasks.append(asyncio.ensure_future(clusters[0].run()))
    await asyncio.sleep(3)
    for cluster in clusters[1:]:
        tasks.append(asyncio.ensure_future(cluster.run()))
    try:
        await asyncio.gather(*tasks)
    except KeyboardInterrupt:
        try:
            for cluster in clusters:
                asyncio.get_event_loop().run_until_complete(cluster.shutdown())
        except Exception:
            pass
예제 #5
0
    def test_cluster_dict(self):
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        args = parser.parse_args(["--num_shards=4", "--genesis_dir="])
        cluster_config = ClusterConfig.create_from_args(args)

        args = parser.parse_args(["--cluster_config=" + cluster_config.json_filepath])
        deserialized = ClusterConfig.create_from_args(args)

        self.assertTrue(cluster_config == deserialized)
예제 #6
0
def parse_args():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    # Unique Id identifying the node in the cluster
    parser.add_argument("--node_id", default="", type=str)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)
    env.slave_config = env.cluster_config.get_slave_config(args.node_id)

    return env
예제 #7
0
def main():
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    config = ClusterConfig.create_from_args(args)
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = Cluster(config)

    cluster.start_and_loop()
예제 #8
0
def parse_args():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--profile", default="", type=str)
    parser.add_argument("--check_db_rblock_from", default=-1, type=int)
    parser.add_argument("--check_db_rblock_to", default=0, type=int)
    parser.add_argument("--check_db_rblock_batch", default=1, type=int)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)
    env.arguments = args

    return env
예제 #9
0
    def test_cluster_slave_config_legacy(self):
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        args = parser.parse_args(
            ["--num_chains=8", "--num_shards_per_chain=1", "--num_slaves=4"])
        cluster_config = ClusterConfig.create_from_args(args)
        # remove full shard list but use chain mask
        d = cluster_config.to_dict()
        for i, s in enumerate(d["SLAVE_LIST"]):
            s["CHAIN_MASK_LIST"] = [i + 4]
            del (s["FULL_SHARD_ID_LIST"])

        deserialized = ClusterConfig.from_dict(d)
        # chain mask translated config should equal previous full shard ID config
        self.assertTrue(cluster_config == deserialized)
예제 #10
0
    def __init__(self, db=None, evm_config=None):
        self.db = db or InMemoryDb()
        self.__cluster_config = ClusterConfig()

        self.evm_config = evm_config or get_default_evm_config()
        self.evm_config["NETWORK_ID"] = self.quark_chain_config.NETWORK_ID
        self.evm_env = EvmEnv(db=self.db, config=self.evm_config)
예제 #11
0
def get_test_env(
    genesis_account=Address.create_empty_account(),
    genesis_quarkash=0,
    genesis_minor_quarkash=0,
    shard_size=2,
    genesis_root_heights=None,
):
    env = DEFAULT_ENV.copy()

    env.db = InMemoryDb()
    env.set_network_id(1234567890)

    env.cluster_config = ClusterConfig()
    env.quark_chain_config.update(shard_size, 1, 1)
    env.quark_chain_config.TESTNET_MASTER_ADDRESS = genesis_account.serialize(
    ).hex()

    if genesis_root_heights:
        check(len(genesis_root_heights) == shard_size)
        for shard_id in range(shard_size):
            env.quark_chain_config.SHARD_LIST[
                shard_id].GENESIS.ROOT_HEIGHT = genesis_root_heights[shard_id]

    # fund genesis account in all shards
    for i, shard in enumerate(env.quark_chain_config.SHARD_LIST):
        shard.GENESIS.ALLOC[genesis_account.address_in_shard(
            i).serialize().hex()] = genesis_minor_quarkash

    env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK = True
    env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK = True
    env.cluster_config.ENABLE_TRANSACTION_HISTORY = True
    env.cluster_config.DB_PATH_ROOT = ""
    check(env.cluster_config.use_mem_db())

    return env
예제 #12
0
def jrpc_server_context(master):
    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig()
    env.cluster_config.JSON_RPC_PORT = 38391
    server = JSONRPCServer.start_test_server(env, master)
    yield server
    server.shutdown()
예제 #13
0
def get_test_env(
    genesis_account=Address.create_empty_account(),
    genesis_minor_quarkash=0,
    chain_size=2,
    shard_size=2,
    genesis_root_heights=None,  # dict(full_shard_id, genesis_root_height)
    remote_mining=False,
    genesis_minor_token_balances={},
):
    check(is_p2(shard_size))
    env = DEFAULT_ENV.copy()

    env.db = InMemoryDb()
    env.set_network_id(1234567890)

    env.cluster_config = ClusterConfig()
    env.quark_chain_config.update(chain_size, shard_size, 10, 1,
                                  env.quark_chain_config.GENESIS_TOKEN)

    if remote_mining:
        env.quark_chain_config.ROOT.CONSENSUS_CONFIG.REMOTE_MINE = True
        env.quark_chain_config.ROOT.CONSENSUS_TYPE = ConsensusType.POW_DOUBLESHA256
        env.quark_chain_config.ROOT.GENESIS.DIFFICULTY = 10

    env.quark_chain_config.ROOT.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME = 40
    env.quark_chain_config.ROOT.DIFFICULTY_ADJUSTMENT_FACTOR = 1024

    if genesis_root_heights:
        check(len(genesis_root_heights) == shard_size * chain_size)
        for chain_id in range(chain_size):
            for shard_id in range(shard_size):
                full_shard_id = chain_id << 16 | shard_size | shard_id
                shard = env.quark_chain_config.shards[full_shard_id]
                shard.GENESIS.ROOT_HEIGHT = genesis_root_heights[full_shard_id]

    # fund genesis account in all shards
    for full_shard_id, shard in env.quark_chain_config.shards.items():
        addr = genesis_account.address_in_shard(
            full_shard_id).serialize().hex()
        if len(genesis_minor_token_balances) != 0:
            shard.GENESIS.ALLOC[addr] = genesis_minor_token_balances
        else:
            shard.GENESIS.ALLOC[addr] = genesis_minor_quarkash
        shard.CONSENSUS_CONFIG.REMOTE_MINE = remote_mining
        shard.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME = 7
        shard.DIFFICULTY_ADJUSTMENT_FACTOR = 512
        if remote_mining:
            shard.CONSENSUS_TYPE = ConsensusType.POW_DOUBLESHA256
            shard.GENESIS.DIFFICULTY = 10
        shard.POSW_CONFIG.WINDOW_SIZE = 2

    env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK = True
    env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK = True
    env.cluster_config.ENABLE_TRANSACTION_HISTORY = True
    env.cluster_config.DB_PATH_ROOT = ""

    check(env.cluster_config.use_mem_db())

    return env
async def main():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--num_clusters", default=2, type=int)
    args = parser.parse_args()
    clusters = []
    mine_i = random.randint(0, args.num_clusters - 1)
    if args.mine:
        print("cluster {} will be mining".format(mine_i))
    else:
        print("No one will be mining")
    mine = args.mine
    db_path_root = args.db_path_root
    for i in range(args.num_clusters):
        args.mine = mine and i == mine_i
        args.db_path_root = "{}_C{}".format(db_path_root, i)

        config = ClusterConfig.create_from_args(args)
        print("Cluster {} config file: {}".format(i, config.json_filepath))
        print(config.to_json())

        clusters.append(
            cl.Cluster(
                config, "{}C{}{}_".format(colors[i % len(colors)], i,
                                          COLOR_END)))

        args.p2p_port += 1
        args.port_start += 100
        args.json_rpc_port += 1
        args.json_rpc_private_port += 1
        args.devp2p_port += 1

    tasks = list()
    tasks.append(asyncio.ensure_future(clusters[0].run()))
    await asyncio.sleep(3)
    for cluster in clusters[1:]:
        tasks.append(asyncio.ensure_future(cluster.run()))
    try:
        await asyncio.gather(*tasks)
    except KeyboardInterrupt:
        try:
            for cluster in clusters:
                asyncio.get_event_loop().run_until_complete(cluster.shutdown())
        except Exception:
            pass
예제 #15
0
 def test_special_contract_enable_ts(self):
     env = Env()
     for addr in PRECOMPILED_CONTRACTS_AFTER_EVM_ENABLED:
         self.assertEqual(specials[addr][1], 0)
     cluster_config = ClusterConfig()
     cluster_config.QUARKCHAIN.ENABLE_EVM_TIMESTAMP = 123
     env.cluster_config = cluster_config
     for addr in PRECOMPILED_CONTRACTS_AFTER_EVM_ENABLED:
         self.assertEqual(specials[addr][1], 123)
예제 #16
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("action", type=str, help="action to take")
    parser.add_argument("--height", type=int, help="block height to operate")
    parser.add_argument("--hash", type=str, help="block hash to operate")
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)

    # initialize database
    if not env.cluster_config.use_mem_db():
        env.db = PersistentDb(
            "{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
            clean=env.cluster_config.CLEAN,
        )

    return env, args
예제 #17
0
def jrpc_server_context(master):
    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig()
    env.cluster_config.JSON_RPC_PORT = 38391
    # to pass the circleCi
    env.cluster_config.JSON_RPC_HOST = "127.0.0.1"
    server = JSONRPCServer.start_test_server(env, master)
    try:
        yield server
    finally:
        server.shutdown()
예제 #18
0
    def test_cluster_dict_wloadtest(self):
        """convert to dict and back to check if the content changed, requires `__eq__`
        removing --loadtest will make the test faster
        passing more num_shards will increase runtime linearly
        """
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        pwd = os.path.dirname(os.path.abspath(__file__))
        default_genesis_dir = os.path.join(pwd, "../../genesis_data")
        args = parser.parse_args(
            ["--num_shards=4", "--genesis_dir=" + default_genesis_dir]
        )
        cluster_config = ClusterConfig.create_from_args(args)

        args = parser.parse_args(["--cluster_config=" + cluster_config.json_filepath])
        deserialized = ClusterConfig.create_from_args(args)

        self.assertTrue(cluster_config == deserialized)
        self.assertTrue(
            len(cluster_config.QUARKCHAIN.SHARD_LIST[0].GENESIS.ALLOC) > 12000
        )
예제 #19
0
def main():
    logging.getLogger("asyncio").setLevel(logging.ERROR)
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--profile", default="", type=str)
    parser.add_argument("--check_db_rblock_from", default=-1, type=int)
    parser.add_argument("--check_db_rblock_to", default=0, type=int)
    parser.add_argument("--check_db_rblock_batch", default=1, type=int)
    args = parser.parse_args()

    config = ClusterConfig.create_from_args(args)
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = Cluster(config, args=args)

    if args.check_db:
        cluster.check_db()
    else:
        cluster.start_and_loop()
def main():
    if "EMAIL_FROM_ADDRESS" not in os.environ:
        raise ValueError(
            "EMAIL_FROM_ADDRESS not found in environment variables")
    if "PASSWORD" not in os.environ:
        raise ValueError("PASSWORD not found in environment variables")
    if "EMAIL_TO_ADDRESS" not in os.environ:
        raise ValueError("EMAIL_TO_ADDRESS not found in environment variables")

    os.chdir(os.path.dirname("../../quarkchain/cluster/"))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()
    config = ClusterConfig.create_from_args(args)

    # creat a config tempfile for health check, which is a copy of the original config file
    tempfile.tempdir = "../../mainnet/singularity/"
    fd, config.json_filepath = tempfile.mkstemp()
    with os.fdopen(fd, "w") as tmp:
        tmp.write(config.to_json())
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = HealthCheckCluster(config)
    bootstrap_nodes = config.P2P.BOOT_NODES.split(",")
    count = 0

    while True:
        bash_command_revised_config = (
            "QKC_CONFIG=" + config.json_filepath +
            " python3 ../../quarkchain/tools/config_p2p.py --bootnodes " +
            bootstrap_nodes[count])
        os.system(bash_command_revised_config)
        print("Start Bootstrap With " + bootstrap_nodes[count])
        cluster.start_and_loop()
        time.sleep(100)
        count = (count + 1) % len(bootstrap_nodes)
예제 #21
0
def main():
    args = parse_args()
    config = ClusterConfig.create_from_args(args)
    db = RootDb(PersistentDb(args.db), config.QUARKCHAIN, 0)
    header = db.get_tip_header()
    if not header:
        raise RuntimeError("Not a valid RootDb")
    from_height = header.height if args.root_height <= 0 else args.root_height
    tip_header = None
    block = db.get_root_block_by_hash(header.get_hash(), False)
    shard_to_address_count = dict()  # shard -> (recipient -> count)
    while block.header.height > 0:
        if block.header.height > from_height:
            block = db.get_root_block_by_hash(block.header.hash_prev_block, False)
            continue
        if block.header.height == from_height:
            tip_header = block.header
        for minor_header in block.minor_block_header_list:
            shard = minor_header.branch.get_full_shard_id()
            address_hex = minor_header.coinbase_address.recipient.hex()
            address_to_count = shard_to_address_count.setdefault(shard, dict())
            current = address_to_count.setdefault(address_hex, 0)
            address_to_count[address_hex] = current + 1
        block = db.get_root_block_by_hash(block.header.hash_prev_block, False)

    algo_to_address_count = dict()  # algorithm -> (recipient -> count)
    for shard_id, address_to_count in shard_to_address_count.items():
        algo = shard_id_to_algorithm(shard_id)
        addr_to_count = algo_to_address_count.setdefault(algo, dict())
        for address, count in address_to_count.items():
            current = addr_to_count.setdefault(address, 0)
            addr_to_count[address] = current + count

    print(
        "Counting shard blocks from root block {} {}".format(
            tip_header.height, tip_header.get_hash().hex()
        )
    )

    for algo, address_count in algo_to_address_count.items():
        total = sum(address_count.values())

        print()
        print("{} has {} blocks".format(algo, total))
        sorted_by_count = sorted(
            address_count.items(), key=operator.itemgetter(1), reverse=True
        )
        for address, count in sorted_by_count:
            print("{} {} {:.2f}%".format(address, count, count / total * 100))
예제 #22
0
def get_test_env(
    genesis_account=Address.create_empty_account(),
    genesis_minor_quarkash=0,
    shard_size=2,
    genesis_root_heights=None,
    remote_mining=False,
):
    env = DEFAULT_ENV.copy()

    env.db = InMemoryDb()
    env.set_network_id(1234567890)

    env.cluster_config = ClusterConfig()
    env.quark_chain_config.update(shard_size, 10, 1)

    if remote_mining:
        env.quark_chain_config.ROOT.CONSENSUS_CONFIG.REMOTE_MINE = True
        env.quark_chain_config.ROOT.CONSENSUS_TYPE = ConsensusType.POW_SHA3SHA3
        env.quark_chain_config.ROOT.GENESIS.DIFFICULTY = 10

    env.quark_chain_config.ROOT.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME = 40
    env.quark_chain_config.ROOT.DIFFICULTY_ADJUSTMENT_FACTOR = 1024

    if genesis_root_heights:
        check(len(genesis_root_heights) == shard_size)
        for shard_id in range(shard_size):
            shard = env.quark_chain_config.SHARD_LIST[shard_id]
            shard.GENESIS.ROOT_HEIGHT = genesis_root_heights[shard_id]

    # fund genesis account in all shards
    for i, shard in enumerate(env.quark_chain_config.SHARD_LIST):
        addr = genesis_account.address_in_shard(i).serialize().hex()
        shard.GENESIS.ALLOC[addr] = genesis_minor_quarkash
        shard.CONSENSUS_CONFIG.REMOTE_MINE = remote_mining
        shard.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME = 7
        shard.DIFFICULTY_ADJUSTMENT_FACTOR = 512
        if remote_mining:
            shard.CONSENSUS_TYPE = ConsensusType.POW_SHA3SHA3
            shard.GENESIS.DIFFICULTY = 10

    env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK = True
    env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK = True
    env.cluster_config.ENABLE_TRANSACTION_HISTORY = True
    env.cluster_config.DB_PATH_ROOT = ""

    check(env.cluster_config.use_mem_db())

    return env
예제 #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--config",
        required=True,
        type=str,
        help=
        "<Required> path to config json file, same as the config running cluster",
    )
    parser.add_argument(
        "-s",
        "--shards",
        required=True,
        nargs="+",
        help=
        '<Required> specify shards (identified by full_shard_key) to mine, use "R" to indicate root chain',
    )
    parser.add_argument("--worker",
                        type=int,
                        help="number of worker threads",
                        default=1)
    parser.add_argument("--host",
                        type=str,
                        help="host address of the cluster",
                        default="localhost")
    args = parser.parse_args()

    with open(args.config) as f:
        cluster_config = ClusterConfig.from_json(f.read())
        qkc_config = cluster_config.QUARKCHAIN

    global cluster_host
    if args.host:
        cluster_host = args.host

    # 1 worker config <-> 1 mining thread <-> 1 or more shards
    worker_configs = [[] for _ in range(min(args.worker, len(args.shards)))
                      ]  # type: List[List[Dict]]

    for worker_i, shard_str in zip(cycle(range(args.worker)), args.shards):
        if shard_str.isnumeric():
            full_shard_key = int(shard_str)
            full_shard_id = qkc_config.get_full_shard_id_by_full_shard_key(
                full_shard_key)
            c = qkc_config.shards[full_shard_id]
        else:
            full_shard_id = None
            c = qkc_config.ROOT
        worker_configs[worker_i].append({
            "full_shard_id":
            full_shard_id,
            "consensus_type":
            c.CONSENSUS_TYPE,
            "target_block_time":
            c.CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
        })

    miners = []
    stopper = threading.Event()
    for config_list in worker_configs:
        ext_miner = ExternalMiner(config_list, stopper)
        ext_miner.start()
        miners.append(ext_miner)

    sig_handler = SigHandler(stopper, miners)
    signal.signal(signal.SIGINT, sig_handler)