Esempio n. 1
0
    def test_cluster_dict(self):
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        args = parser.parse_args(["--num_shards=4", "--genesis_dir="])
        cluster_config = ClusterConfig.create_from_args(args)

        args = parser.parse_args(["--cluster_config=" + cluster_config.json_filepath])
        deserialized = ClusterConfig.create_from_args(args)

        self.assertTrue(cluster_config == deserialized)
Esempio n. 2
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--full_shard_id",
                        type=int,
                        help="full shard id to operate")
    parser.add_argument(
        "--all_shards",
        action="store_true",
        default=False,
        help="query balances in all shards",
    )
    parser.add_argument("--recipient",
                        default=None,
                        type=str,
                        help="query a specific recipient")
    parser.add_argument(
        "--minor_block_height",
        default=None,
        type=int,
        help="query balance at specific minor block height",
    )
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)

    # initialize database
    if not env.cluster_config.use_mem_db():
        env.db = PersistentDb(
            "{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
            clean=env.cluster_config.CLEAN,
        )

    return env, args
Esempio n. 3
0
 def test_kafka_log(self):
     parser = argparse.ArgumentParser()
     ClusterConfig.attach_arguments(parser)
     args = parser.parse_args(
         [])  # set --kafka_rest_address correctly to see real actions
     cluster_config = ClusterConfig.create_from_args(args)
     sample = dict(a=1, b=2, c=["x", "y"])
     cluster_config.kafka_logger.log_kafka_sample("dlltest", sample)
Esempio n. 4
0
 def test_unknown_structure(self):
     parser = argparse.ArgumentParser()
     ClusterConfig.attach_arguments(parser)
     args = parser.parse_args(["--monitoring_kafka_rest_address=x"])
     cluster_config = ClusterConfig.create_from_args(args)
     sample = dict(a=1, b=2, c={"x", "y"})
     cluster_config.kafka_logger.log_kafka_sample(
         "topic", sample)  # should trigger warning log
Esempio n. 5
0
async def main():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--num_clusters", default=2, type=int)
    args = parser.parse_args()
    clusters = []
    mine_i = random.randint(0, args.num_clusters - 1)
    mine = args.start_simulated_mining
    if mine:
        print("cluster {} will be mining".format(mine_i))
    else:
        print("No one will be mining")

    db_path_root = args.db_path_root
    p2p_port = args.p2p_port
    for i in range(args.num_clusters):
        args.start_simulated_mining = mine and i == mine_i
        args.db_path_root = "{}_C{}".format(db_path_root, i)

        # set up p2p bootstrapping, with fixed bootstrap key for now
        if args.p2p:
            if i == 0:
                args.privkey = (
                    "31552f186bf90908ce386fb547dd0410bf443309125cc43fd0ffd642959bf6d9"
                )
            else:
                args.privkey = ""

            args.bootnodes = "enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:{}".format(
                p2p_port)

        config = ClusterConfig.create_from_args(args)
        print("Cluster {} config file: {}".format(i, config.json_filepath))
        print(config.to_json())

        clusters.append(
            cl.Cluster(
                config, "{}C{}{}_".format(colors[i % len(colors)], i,
                                          COLOR_END)))

        args.p2p_port += 1
        args.port_start += 100
        args.json_rpc_port += 1
        args.json_rpc_private_port += 1

    tasks = list()
    tasks.append(asyncio.ensure_future(clusters[0].run()))
    await asyncio.sleep(3)
    for cluster in clusters[1:]:
        tasks.append(asyncio.ensure_future(cluster.run()))
    try:
        await asyncio.gather(*tasks)
    except KeyboardInterrupt:
        try:
            for cluster in clusters:
                asyncio.get_event_loop().run_until_complete(cluster.shutdown())
        except Exception:
            pass
Esempio n. 6
0
def parse_args():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    # Unique Id identifying the node in the cluster
    parser.add_argument("--node_id", default="", type=str)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)
    env.slave_config = env.cluster_config.get_slave_config(args.node_id)

    return env
Esempio n. 7
0
    def test_cluster_dict_wloadtest(self):
        """convert to dict and back to check if the content changed, requires `__eq__`
        removing --loadtest will make the test faster
        passing more num_shards will increase runtime linearly
        """
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        pwd = os.path.dirname(os.path.abspath(__file__))
        default_genesis_dir = os.path.join(pwd, "../../genesis_data")
        args = parser.parse_args(
            ["--num_shards=4", "--genesis_dir=" + default_genesis_dir]
        )
        cluster_config = ClusterConfig.create_from_args(args)

        args = parser.parse_args(["--cluster_config=" + cluster_config.json_filepath])
        deserialized = ClusterConfig.create_from_args(args)

        self.assertTrue(cluster_config == deserialized)
        self.assertTrue(
            len(cluster_config.QUARKCHAIN.SHARD_LIST[0].GENESIS.ALLOC) > 12000
        )
Esempio n. 8
0
def main():
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    config = ClusterConfig.create_from_args(args)
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = Cluster(config)

    cluster.start_and_loop()
Esempio n. 9
0
def parse_args():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--profile", default="", type=str)
    parser.add_argument("--check_db_rblock_from", default=-1, type=int)
    parser.add_argument("--check_db_rblock_to", default=0, type=int)
    parser.add_argument("--check_db_rblock_batch", default=1, type=int)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)
    env.arguments = args

    return env
Esempio n. 10
0
def main():
    args = parse_args()
    config = ClusterConfig.create_from_args(args)
    db = RootDb(PersistentDb(args.db), config.QUARKCHAIN, 0)
    header = db.get_tip_header()
    if not header:
        raise RuntimeError("Not a valid RootDb")
    from_height = header.height if args.root_height <= 0 else args.root_height
    tip_header = None
    block = db.get_root_block_by_hash(header.get_hash(), False)
    shard_to_address_count = dict()  # shard -> (recipient -> count)
    while block.header.height > 0:
        if block.header.height > from_height:
            block = db.get_root_block_by_hash(block.header.hash_prev_block, False)
            continue
        if block.header.height == from_height:
            tip_header = block.header
        for minor_header in block.minor_block_header_list:
            shard = minor_header.branch.get_full_shard_id()
            address_hex = minor_header.coinbase_address.recipient.hex()
            address_to_count = shard_to_address_count.setdefault(shard, dict())
            current = address_to_count.setdefault(address_hex, 0)
            address_to_count[address_hex] = current + 1
        block = db.get_root_block_by_hash(block.header.hash_prev_block, False)

    algo_to_address_count = dict()  # algorithm -> (recipient -> count)
    for shard_id, address_to_count in shard_to_address_count.items():
        algo = shard_id_to_algorithm(shard_id)
        addr_to_count = algo_to_address_count.setdefault(algo, dict())
        for address, count in address_to_count.items():
            current = addr_to_count.setdefault(address, 0)
            addr_to_count[address] = current + count

    print(
        "Counting shard blocks from root block {} {}".format(
            tip_header.height, tip_header.get_hash().hex()
        )
    )

    for algo, address_count in algo_to_address_count.items():
        total = sum(address_count.values())

        print()
        print("{} has {} blocks".format(algo, total))
        sorted_by_count = sorted(
            address_count.items(), key=operator.itemgetter(1), reverse=True
        )
        for address, count in sorted_by_count:
            print("{} {} {:.2f}%".format(address, count, count / total * 100))
Esempio n. 11
0
    def test_cluster_slave_config_legacy(self):
        parser = argparse.ArgumentParser()
        ClusterConfig.attach_arguments(parser)
        args = parser.parse_args(
            ["--num_chains=8", "--num_shards_per_chain=1", "--num_slaves=4"])
        cluster_config = ClusterConfig.create_from_args(args)
        # remove full shard list but use chain mask
        d = cluster_config.to_dict()
        for i, s in enumerate(d["SLAVE_LIST"]):
            s["CHAIN_MASK_LIST"] = [i + 4]
            del (s["FULL_SHARD_ID_LIST"])

        deserialized = ClusterConfig.from_dict(d)
        # chain mask translated config should equal previous full shard ID config
        self.assertTrue(cluster_config == deserialized)
async def main():
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--num_clusters", default=2, type=int)
    args = parser.parse_args()
    clusters = []
    mine_i = random.randint(0, args.num_clusters - 1)
    if args.mine:
        print("cluster {} will be mining".format(mine_i))
    else:
        print("No one will be mining")
    mine = args.mine
    db_path_root = args.db_path_root
    for i in range(args.num_clusters):
        args.mine = mine and i == mine_i
        args.db_path_root = "{}_C{}".format(db_path_root, i)

        config = ClusterConfig.create_from_args(args)
        print("Cluster {} config file: {}".format(i, config.json_filepath))
        print(config.to_json())

        clusters.append(
            cl.Cluster(
                config, "{}C{}{}_".format(colors[i % len(colors)], i,
                                          COLOR_END)))

        args.p2p_port += 1
        args.port_start += 100
        args.json_rpc_port += 1
        args.json_rpc_private_port += 1
        args.devp2p_port += 1

    tasks = list()
    tasks.append(asyncio.ensure_future(clusters[0].run()))
    await asyncio.sleep(3)
    for cluster in clusters[1:]:
        tasks.append(asyncio.ensure_future(cluster.run()))
    try:
        await asyncio.gather(*tasks)
    except KeyboardInterrupt:
        try:
            for cluster in clusters:
                asyncio.get_event_loop().run_until_complete(cluster.shutdown())
        except Exception:
            pass
Esempio n. 13
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("action", type=str, help="action to take")
    parser.add_argument("--height", type=int, help="block height to operate")
    parser.add_argument("--hash", type=str, help="block hash to operate")
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()

    env = DEFAULT_ENV.copy()
    env.cluster_config = ClusterConfig.create_from_args(args)

    # initialize database
    if not env.cluster_config.use_mem_db():
        env.db = PersistentDb(
            "{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
            clean=env.cluster_config.CLEAN,
        )

    return env, args
Esempio n. 14
0
def main():
    logging.getLogger("asyncio").setLevel(logging.ERROR)
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    parser.add_argument("--profile", default="", type=str)
    parser.add_argument("--check_db_rblock_from", default=-1, type=int)
    parser.add_argument("--check_db_rblock_to", default=0, type=int)
    parser.add_argument("--check_db_rblock_batch", default=1, type=int)
    args = parser.parse_args()

    config = ClusterConfig.create_from_args(args)
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = Cluster(config, args=args)

    if args.check_db:
        cluster.check_db()
    else:
        cluster.start_and_loop()
def main():
    if "EMAIL_FROM_ADDRESS" not in os.environ:
        raise ValueError(
            "EMAIL_FROM_ADDRESS not found in environment variables")
    if "PASSWORD" not in os.environ:
        raise ValueError("PASSWORD not found in environment variables")
    if "EMAIL_TO_ADDRESS" not in os.environ:
        raise ValueError("EMAIL_TO_ADDRESS not found in environment variables")

    os.chdir(os.path.dirname("../../quarkchain/cluster/"))
    parser = argparse.ArgumentParser()
    ClusterConfig.attach_arguments(parser)
    args = parser.parse_args()
    config = ClusterConfig.create_from_args(args)

    # creat a config tempfile for health check, which is a copy of the original config file
    tempfile.tempdir = "../../mainnet/singularity/"
    fd, config.json_filepath = tempfile.mkstemp()
    with os.fdopen(fd, "w") as tmp:
        tmp.write(config.to_json())
    print("Cluster config file: {}".format(config.json_filepath))
    print(config.to_json())

    cluster = HealthCheckCluster(config)
    bootstrap_nodes = config.P2P.BOOT_NODES.split(",")
    count = 0

    while True:
        bash_command_revised_config = (
            "QKC_CONFIG=" + config.json_filepath +
            " python3 ../../quarkchain/tools/config_p2p.py --bootnodes " +
            bootstrap_nodes[count])
        os.system(bash_command_revised_config)
        print("Start Bootstrap With " + bootstrap_nodes[count])
        cluster.start_and_loop()
        time.sleep(100)
        count = (count + 1) % len(bootstrap_nodes)