def test_reward_tax_rate(self): config = QuarkChainConfig() self.assertEqual(config.reward_tax_rate, Fraction(1, 2)) config.REWARD_TAX_RATE = 0.33 self.assertEqual(config.reward_tax_rate, Fraction(33, 100)) config.REWARD_TAX_RATE = 0.8 self.assertEqual(config.reward_tax_rate, Fraction(4, 5)) config.REWARD_TAX_RATE = 0.123 with self.assertRaises(AssertionError): _ = config.reward_tax_rate
def __init__(self): self.QUARKCHAIN = QuarkChainConfig() self.MASTER = MasterConfig() self.SLAVE_LIST = [] # type: List[SlaveConfig] self.SIMPLE_NETWORK = SimpleNetworkConfig() self._json_filepath = None self.MONITORING = MonitoringConfig() self.kafka_logger = KafkaSampleLogger(self) slave_config = SlaveConfig() slave_config.PORT = 38000 slave_config.ID = "S0" slave_config.FULL_SHARD_ID_LIST = [1] self.SLAVE_LIST.append(slave_config) fd, self.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(self.to_json())
def from_dict(cls, d): config = super().from_dict(d) config.QUARKCHAIN = QuarkChainConfig.from_dict(config.QUARKCHAIN) config.MONITORING = MonitoringConfig.from_dict(config.MONITORING) config.MASTER = MasterConfig.from_dict(config.MASTER) config.SLAVE_LIST = [SlaveConfig.from_dict(s) for s in config.SLAVE_LIST] if "P2P" in d: config.P2P = P2PConfig.from_dict(d["P2P"]) else: config.SIMPLE_NETWORK = SimpleNetworkConfig.from_dict(d["SIMPLE_NETWORK"]) return config
def testBasic(self): config = QuarkChainConfig() config.ROOT = RootConfig() config.ROOT.CONSENSUS_TYPE = ConsensusType.POW_SIMULATE config.ROOT.CONSENSUS_CONFIG = POWConfig() config.ROOT.CONSENSUS_CONFIG.TARGET_BLOCK_TIME = 60 config.SHARD_LIST = [] for i in range(2): s = ShardConfig() s.CONSENSUS_TYPE = ConsensusType.POW_SHA3SHA3 s.CONSENSUS_CONFIG = POWConfig() config.SHARD_LIST.append(s) for i in range(2): s = ShardConfig() s.CONSENSUS_TYPE = ConsensusType.POW_ETHASH s.CONSENSUS_CONFIG = POWConfig() s.CONSENSUS_CONFIG.TARGET_BLOCK_TIME = 15 config.SHARD_LIST.append(s) for i in range(1): s = ShardConfig() config.SHARD_LIST.append(s) expected_json = """{ "SHARD_SIZE": 8, "MAX_NEIGHBORS": 32, "MINOR_BLOCK_DEFAULT_REWARD": 100000000000000000000, "NETWORK_ID": 3, "TRANSACTION_QUEUE_SIZE_LIMIT_PER_SHARD": 10000, "BLOCK_EXTRA_DATA_SIZE_LIMIT": 1024, "PROOF_OF_PROGRESS_BLOCKS": 1, "TESTNET_MASTER_ADDRESS": "199bcc2ebf71a851e388bd926595376a49bdaa329c6485f3", "P2P_PROTOCOL_VERSION": 0, "P2P_COMMAND_SIZE_LIMIT": 4294967295, "SKIP_ROOT_DIFFICULTY_CHECK": false, "SKIP_MINOR_DIFFICULTY_CHECK": false, "ROOT": { "MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF": 60, "CONSENSUS_TYPE": "POW_SIMULATE", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 60 }, "GENESIS": { "VERSION": 0, "HEIGHT": 0, "SHARD_SIZE": 32, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5, "HASH_PREV_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "TIMESTAMP": 1519147489, "DIFFICULTY": 1000000, "NONCE": 0 } }, "SHARD_LIST": [ { "CONSENSUS_TYPE": "POW_SHA3SHA3", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 10 }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "NONCE": 0, "ALLOC": {} } }, { "CONSENSUS_TYPE": "POW_SHA3SHA3", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 10 }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "NONCE": 0, "ALLOC": {} } }, { "CONSENSUS_TYPE": "POW_ETHASH", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 15 }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "NONCE": 0, "ALLOC": {} } }, { "CONSENSUS_TYPE": "POW_ETHASH", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 15 }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "NONCE": 0, "ALLOC": {} } }, { "CONSENSUS_TYPE": "NONE" } ] }""" self.assertEqual(config.to_json(), expected_json) deserialized_config = QuarkChainConfig.from_json(expected_json) self.assertEqual(deserialized_config.to_json(), expected_json)
class ClusterConfig(BaseConfig): P2P_PORT = 38291 JSON_RPC_PORT = 38391 PRIVATE_JSON_RPC_PORT = 38491 JSON_RPC_HOST = "localhost" PRIVATE_JSON_RPC_HOST = "localhost" ENABLE_PUBLIC_JSON_RPC = True ENABLE_PRIVATE_JSON_RPC = True ENABLE_TRANSACTION_HISTORY = False DB_PATH_ROOT = "./db" LOG_LEVEL = "info" START_SIMULATED_MINING = False CLEAN = False GENESIS_DIR = None QUARKCHAIN = None MASTER = None SLAVE_LIST = None SIMPLE_NETWORK = None P2P = None PROMETHEUS = None MONITORING = None def __init__(self): self.QUARKCHAIN = QuarkChainConfig() self.MASTER = MasterConfig() self.SLAVE_LIST = [] # type: List[SlaveConfig] self.SIMPLE_NETWORK = SimpleNetworkConfig() self._json_filepath = None self.MONITORING = MonitoringConfig() self.kafka_logger = KafkaSampleLogger(self) slave_config = SlaveConfig() slave_config.PORT = 38000 slave_config.ID = "S0" slave_config.FULL_SHARD_ID_LIST = [1] self.SLAVE_LIST.append(slave_config) fd, self.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(self.to_json()) def get_slave_info_list(self): results = [] for slave in self.SLAVE_LIST: results.append( SlaveInfo(slave.ID, slave.HOST, slave.PORT, slave.FULL_SHARD_ID_LIST)) return results def get_slave_config(self, id): for slave in self.SLAVE_LIST: if slave.ID == id: return slave raise RuntimeError( "Slave id {0} does not exist in cluster config".format(id)) @property def json_filepath(self): return self._json_filepath @json_filepath.setter def json_filepath(self, value): self._json_filepath = value def use_p2p(self): return self.P2P is not None def use_mem_db(self): return not self.DB_PATH_ROOT def apply_env(self): for k, v in os.environ.items(): key_path = k.split("__") if key_path[0] != "QKC": continue print("Applying env {0}: {1}".format(k, v)) config = self for i in range(1, len(key_path) - 1): name = key_path[i] if not hasattr(config, name): raise ValueError( "Cannot apply env {}: key not found".format(k)) config = getattr(config, name) if not isinstance(config, BaseConfig): raise ValueError( "Cannot apply env {}: config not found".format(k)) if not hasattr(config, key_path[-1]): raise ValueError( "Cannot apply env {}: key not found".format(k)) setattr(config, key_path[-1], eval(v)) @classmethod def attach_arguments(cls, parser): parser.add_argument("--cluster_config", default="", type=str) parser.add_argument("--log_level", default=ClusterConfig.LOG_LEVEL, type=str) parser.add_argument("--clean", action="store_true", default=ClusterConfig.CLEAN, dest="clean") parser.add_argument( "--start_simulated_mining", action="store_true", default=ClusterConfig.START_SIMULATED_MINING, dest="start_simulated_mining", ) pwd = os.path.dirname(os.path.abspath(__file__)) default_genesis_dir = os.path.join(pwd, "../genesis_data") parser.add_argument("--genesis_dir", default=default_genesis_dir, type=str) parser.add_argument("--num_chains", default=QuarkChainConfig.CHAIN_SIZE, type=int) parser.add_argument("--num_shards_per_chain", default=ChainConfig.SHARD_SIZE, type=int) parser.add_argument("--root_block_interval_sec", default=10, type=int) parser.add_argument("--minor_block_interval_sec", default=3, type=int) parser.add_argument("--network_id", default=QuarkChainConfig.NETWORK_ID, type=int) parser.add_argument( "--default_token", default=QuarkChainConfig.GENESIS_TOKEN, type=str, help="sets GENESIS_TOKEN and DEFAULT_CHAIN_TOKEN", ) parser.add_argument("--num_slaves", default=4, type=int) parser.add_argument("--port_start", default=38000, type=int) parser.add_argument("--db_path_root", default=ClusterConfig.DB_PATH_ROOT, type=str) parser.add_argument("--p2p_port", default=ClusterConfig.P2P_PORT, type=int) parser.add_argument("--json_rpc_port", default=ClusterConfig.JSON_RPC_PORT, type=int) parser.add_argument( "--json_rpc_private_port", default=ClusterConfig.PRIVATE_JSON_RPC_PORT, type=int, ) parser.add_argument("--json_rpc_host", default=ClusterConfig.JSON_RPC_HOST, type=str) parser.add_argument( "--json_rpc_private_host", default=ClusterConfig.PRIVATE_JSON_RPC_HOST, type=str, ) parser.add_argument( "--enable_public_json_rpc", default=ClusterConfig.ENABLE_PUBLIC_JSON_RPC, type=bool, ) parser.add_argument( "--enable_private_json_rpc", default=ClusterConfig.ENABLE_PRIVATE_JSON_RPC, type=bool, ) parser.add_argument( "--enable_transaction_history", action="store_true", default=False, dest="enable_transaction_history", ) parser.add_argument( "--simple_network_bootstrap_host", default=SimpleNetworkConfig.BOOTSTRAP_HOST, ) parser.add_argument( "--simple_network_bootstrap_port", default=SimpleNetworkConfig.BOOTSTRAP_PORT, ) # p2p module parser.add_argument( "--p2p", action="store_true", default=False, dest="p2p", help="enables new p2p module", ) parser.add_argument( "--max_peers", default=P2PConfig.MAX_PEERS, type=int, help="max peer for new p2p module", ) parser.add_argument( "--bootnodes", default="", type=str, help="comma seperated enodes in the format: enode://PUBKEY@IP:PORT", ) parser.add_argument( "--upnp", action="store_true", default=False, dest="upnp", help= "if true, automatically runs a upnp service that sets port mapping on upnp-enabled devices", ) parser.add_argument( "--privkey", default="", type=str, help= "if empty, will be automatically generated; but note that it will be lost upon node reboot", ) parser.add_argument( "--check_db", default=False, type=bool, help="if true, will perform integrity check on db only", ) parser.add_argument( "--enable_prometheus", action="store_true", default=False, dest="prom", help="enable prometheus client for monitoring", ) parser.add_argument( "--prom_interval", default=PrometheusConfig.INTERVAL, type=int, help="intervals between prometheus queries", ) parser.add_argument( "--prom_tokens", default=PrometheusConfig.TOKENS, type=str, help="tokens to be monitored by prometheus, separated by comma", ) parser.add_argument( "--prom_port", default=PrometheusConfig.PORT, type=int, help="port for prometheus exposing", ) parser.add_argument( "--enable_count_balance", action="store_true", default=False, dest="bal", help="use prometheus to monitoring total balance", ) parser.add_argument("--monitoring_kafka_rest_address", default="", type=str) @classmethod def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check( is_p2(args.num_shards_per_chain), "--num_shards_per_chain must be power of 2", ) check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.JSON_RPC_HOST = args.json_rpc_host config.PRIVATE_JSON_RPC_HOST = args.json_rpc_private_host config.CLEAN = args.clean config.START_SIMULATED_MINING = args.start_simulated_mining config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_chains, args.num_shards_per_chain, args.root_block_interval_sec, args.minor_block_interval_sec, args.default_token, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.p2p: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() # p2p module config.P2P.BOOT_NODES = args.bootnodes config.P2P.PRIV_KEY = args.privkey config.P2P.MAX_PEERS = args.max_peers config.P2P.UPNP = args.upnp else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port) if args.prom: config.PROMETHEUS = PrometheusConfig() config.PROMETHEUS.INTERVAL = args.prom_interval config.PROMETHEUS.TOKENS = args.prom_tokens config.PROMETHEUS.PORT = args.prom_port config.SLAVE_LIST = [] for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.FULL_SHARD_ID_LIST = [] config.SLAVE_LIST.append(slave_config) # assign full shard IDs to each slave, using hex strings to write into JSON full_shard_ids = [(i << 16) + args.num_shards_per_chain + j for i in range(args.num_chains) for j in range(args.num_shards_per_chain)] for i, full_shard_id in enumerate(full_shard_ids): slave = config.SLAVE_LIST[i % args.num_slaves] slave.FULL_SHARD_ID_LIST.append(full_shard_id) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() config.apply_env() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config def to_dict(self): ret = super().to_dict() ret["QUARKCHAIN"] = self.QUARKCHAIN.to_dict() ret["MONITORING"] = self.MONITORING.to_dict() ret["MASTER"] = self.MASTER.to_dict() ret["SLAVE_LIST"] = [s.to_dict() for s in self.SLAVE_LIST] if self.PROMETHEUS: ret["PROMETHEUS"] = self.PROMETHEUS.to_dict() if self.P2P: ret["P2P"] = self.P2P.to_dict() del ret["SIMPLE_NETWORK"] else: ret["SIMPLE_NETWORK"] = self.SIMPLE_NETWORK.to_dict() del ret["P2P"] return ret @classmethod def from_dict(cls, d): config = super().from_dict(d) config.QUARKCHAIN = QuarkChainConfig.from_dict(config.QUARKCHAIN) config.MONITORING = MonitoringConfig.from_dict(config.MONITORING) config.MASTER = MasterConfig.from_dict(config.MASTER) config.SLAVE_LIST = [ SlaveConfig.from_dict(s, config.QUARKCHAIN.CHAINS) for s in config.SLAVE_LIST ] if d.get("PROMETHEUS"): config.PROMETHEUS = PrometheusConfig.from_dict(d["PROMETHEUS"]) if "P2P" in d: config.P2P = P2PConfig.from_dict(d["P2P"]) else: config.SIMPLE_NETWORK = SimpleNetworkConfig.from_dict( d["SIMPLE_NETWORK"]) return config
class ClusterConfig(BaseConfig): P2P_PORT = 38291 JSON_RPC_PORT = 38391 PRIVATE_JSON_RPC_PORT = 38491 ENABLE_TRANSACTION_HISTORY = False DB_PATH_ROOT = "./db" LOG_LEVEL = "info" START_SIMULATED_MINING = False CLEAN = False GENESIS_DIR = None QUARKCHAIN = None MASTER = None SLAVE_LIST = None SIMPLE_NETWORK = None P2P = None MONITORING = None def __init__(self): self.QUARKCHAIN = QuarkChainConfig() self.MASTER = MasterConfig() self.SLAVE_LIST = [] # type: List[SlaveConfig] self.SIMPLE_NETWORK = SimpleNetworkConfig() self._json_filepath = None self.MONITORING = MonitoringConfig() self.kafka_logger = KafkaSampleLogger(self) slave_config = SlaveConfig() slave_config.PORT = 38000 slave_config.ID = "S0" slave_config.SHARD_MASK_LIST = [ShardMask(1)] self.SLAVE_LIST.append(slave_config) fd, self.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(self.to_json()) def get_slave_info_list(self): results = [] for slave in self.SLAVE_LIST: results.append( SlaveInfo(slave.ID, slave.HOST, slave.PORT, slave.SHARD_MASK_LIST) ) return results def get_slave_config(self, id): for slave in self.SLAVE_LIST: if slave.ID == id: return slave raise RuntimeError("Slave id {} does not exist in cluster config".format(id)) @property def json_filepath(self): return self._json_filepath @json_filepath.setter def json_filepath(self, value): self._json_filepath = value def use_p2p(self): return self.P2P is not None def use_mem_db(self): return not self.DB_PATH_ROOT @classmethod def attach_arguments(cls, parser): parser.add_argument("--cluster_config", default="", type=str) parser.add_argument("--log_level", default=ClusterConfig.LOG_LEVEL, type=str) parser.add_argument( "--clean", action="store_true", default=ClusterConfig.CLEAN, dest="clean" ) parser.add_argument( "--start_simulated_mining", action="store_true", default=ClusterConfig.START_SIMULATED_MINING, dest="start_simulated_mining", ) pwd = os.path.dirname(os.path.abspath(__file__)) default_genesis_dir = os.path.join(pwd, "../genesis_data") parser.add_argument("--genesis_dir", default=default_genesis_dir, type=str) parser.add_argument( "--num_shards", default=QuarkChainConfig.SHARD_SIZE, type=int ) parser.add_argument("--root_block_interval_sec", default=10, type=int) parser.add_argument("--minor_block_interval_sec", default=3, type=int) parser.add_argument( "--network_id", default=QuarkChainConfig.NETWORK_ID, type=int ) parser.add_argument("--num_slaves", default=4, type=int) parser.add_argument("--port_start", default=38000, type=int) parser.add_argument( "--db_path_root", default=ClusterConfig.DB_PATH_ROOT, type=str ) parser.add_argument("--p2p_port", default=ClusterConfig.P2P_PORT, type=int) parser.add_argument( "--json_rpc_port", default=ClusterConfig.JSON_RPC_PORT, type=int ) parser.add_argument( "--json_rpc_private_port", default=ClusterConfig.PRIVATE_JSON_RPC_PORT, type=int, ) parser.add_argument( "--enable_transaction_history", action="store_true", default=False, dest="enable_transaction_history", ) parser.add_argument( "--simple_network_bootstrap_host", default=SimpleNetworkConfig.BOOTSTRAP_HOST, ) parser.add_argument( "--simple_network_bootstrap_port", default=SimpleNetworkConfig.BOOTSTRAP_PORT, ) # p2p module parser.add_argument( "--p2p", action="store_true", default=False, dest="p2p", help="enables new p2p module", ) parser.add_argument( "--max_peers", default=P2PConfig.MAX_PEERS, type=int, help="max peer for new p2p module", ) parser.add_argument( "--bootnodes", default="", type=str, help="comma seperated enodes in the format: enode://PUBKEY@IP:PORT", ) parser.add_argument( "--upnp", action="store_true", default=False, dest="upnp", help="if true, automatically runs a upnp service that sets port mapping on upnp-enabled devices", ) parser.add_argument( "--privkey", default="", type=str, help="if empty, will be automatically generated; but note that it will be lost upon node reboot", ) parser.add_argument("--monitoring_kafka_rest_address", default="", type=str) @classmethod def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check(is_p2(args.num_shards), "--num_shards must be power of 2") check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.CLEAN = args.clean config.START_SIMULATED_MINING = args.start_simulated_mining config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_shards, args.root_block_interval_sec, args.minor_block_interval_sec, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.p2p: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() # p2p module config.P2P.BOOT_NODES = args.bootnodes config.P2P.PRIV_KEY = args.privkey config.P2P.MAX_PEERS = args.max_peers config.P2P.UPNP = args.upnp else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host ) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port ) config.SLAVE_LIST = [] for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)] config.SLAVE_LIST.append(slave_config) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config def to_dict(self): ret = super().to_dict() ret["QUARKCHAIN"] = self.QUARKCHAIN.to_dict() ret["MONITORING"] = self.MONITORING.to_dict() ret["MASTER"] = self.MASTER.to_dict() ret["SLAVE_LIST"] = [s.to_dict() for s in self.SLAVE_LIST] if self.P2P: ret["P2P"] = self.P2P.to_dict() del ret["SIMPLE_NETWORK"] else: ret["SIMPLE_NETWORK"] = self.SIMPLE_NETWORK.to_dict() del ret["P2P"] return ret @classmethod def from_dict(cls, d): config = super().from_dict(d) config.QUARKCHAIN = QuarkChainConfig.from_dict(config.QUARKCHAIN) config.MONITORING = MonitoringConfig.from_dict(config.MONITORING) config.MASTER = MasterConfig.from_dict(config.MASTER) config.SLAVE_LIST = [SlaveConfig.from_dict(s) for s in config.SLAVE_LIST] if "P2P" in d: config.P2P = P2PConfig.from_dict(d["P2P"]) else: config.SIMPLE_NETWORK = SimpleNetworkConfig.from_dict(d["SIMPLE_NETWORK"]) return config
class ClusterConfig(BaseConfig): P2P_PORT = 38291 JSON_RPC_PORT = 38391 PRIVATE_JSON_RPC_PORT = 38491 ENABLE_TRANSACTION_HISTORY = False DB_PATH_ROOT = "./db" LOG_LEVEL = "info" MINE = False CLEAN = False GENESIS_DIR = None QUARKCHAIN = None MASTER = None SLAVE_LIST = None SIMPLE_NETWORK = None P2P = None MONITORING = None def __init__(self): self.QUARKCHAIN = QuarkChainConfig() self.MASTER = MasterConfig() self.SLAVE_LIST = [] self.SIMPLE_NETWORK = SimpleNetworkConfig() self._json_filepath = None self.MONITORING = MonitoringConfig() self.kafka_logger = KafkaSampleLogger(self) slave_config = SlaveConfig() slave_config.PORT = 38000 slave_config.ID = "S0" slave_config.SHARD_MASK_LIST = [ShardMask(1)] self.SLAVE_LIST.append(slave_config) fd, self.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(self.to_json()) def get_slave_info_list(self): results = [] for slave in self.SLAVE_LIST: ip = int(ipaddress.ip_address(slave.IP)) results.append( SlaveInfo(slave.ID, ip, slave.PORT, slave.SHARD_MASK_LIST)) return results def get_slave_config(self, id): for slave in self.SLAVE_LIST: if slave.ID == id: return slave raise RuntimeError( "Slave id {} does not exist in cluster config".format(id)) @property def json_filepath(self): return self._json_filepath @json_filepath.setter def json_filepath(self, value): self._json_filepath = value def use_p2p(self): return self.P2P is not None def use_mem_db(self): return not self.DB_PATH_ROOT @classmethod def attach_arguments(cls, parser): parser.add_argument("--cluster_config", default="", type=str) parser.add_argument("--log_level", default=ClusterConfig.LOG_LEVEL, type=str) parser.add_argument("--clean", action="store_true", default=ClusterConfig.CLEAN, dest="clean") parser.add_argument("--mine", action="store_true", default=ClusterConfig.MINE, dest="mine") pwd = os.path.dirname(os.path.abspath(__file__)) default_genesis_dir = os.path.join(pwd, "../genesis_data") parser.add_argument("--genesis_dir", default=default_genesis_dir, type=str) parser.add_argument("--num_shards", default=QuarkChainConfig.SHARD_SIZE, type=int) parser.add_argument("--root_block_interval_sec", default=10, type=int) parser.add_argument("--minor_block_interval_sec", default=3, type=int) parser.add_argument("--network_id", default=QuarkChainConfig.NETWORK_ID, type=int) parser.add_argument("--num_slaves", default=4, type=int) parser.add_argument("--port_start", default=38000, type=int) parser.add_argument("--db_path_root", default=ClusterConfig.DB_PATH_ROOT, type=str) parser.add_argument("--p2p_port", default=ClusterConfig.P2P_PORT) parser.add_argument("--json_rpc_port", default=ClusterConfig.JSON_RPC_PORT, type=int) parser.add_argument( "--json_rpc_private_port", default=ClusterConfig.PRIVATE_JSON_RPC_PORT, type=int, ) parser.add_argument( "--enable_transaction_history", action="store_true", default=False, dest="enable_transaction_history", ) parser.add_argument( "--simple_network_bootstrap_host", default=SimpleNetworkConfig.BOOTSTRAP_HOST, ) parser.add_argument( "--simple_network_bootstrap_port", default=SimpleNetworkConfig.BOOTSTRAP_PORT, ) parser.add_argument("--devp2p_enable", action="store_true", default=False, dest="devp2p_enable") """ set devp2p_ip so that peers can connect to this cluster leave empty if you want to use `socket.gethostbyname()`, but it may cause this cluster to be unreachable by peers """ parser.add_argument("--devp2p_ip", default=P2PConfig.IP, type=str) parser.add_argument("--devp2p_port", default=P2PConfig.DISCOVERY_PORT, type=int) parser.add_argument("--devp2p_bootstrap_host", default=P2PConfig.BOOTSTRAP_HOST, type=str) parser.add_argument("--devp2p_bootstrap_port", default=P2PConfig.BOOTSTRAP_PORT, type=int) parser.add_argument("--devp2p_min_peers", default=P2PConfig.MIN_PEERS, type=int) parser.add_argument("--devp2p_max_peers", default=P2PConfig.MAX_PEERS, type=int) parser.add_argument("--devp2p_additional_bootstraps", default="", type=str) parser.add_argument("--monitoring_kafka_rest_address", default="", type=str) @classmethod def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check(is_p2(args.num_shards), "--num_shards must be power of 2") check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.CLEAN = args.clean config.MINE = args.mine config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_shards, args.root_block_interval_sec, args.minor_block_interval_sec, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.devp2p_enable: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() config.P2P.IP = args.devp2p_ip config.P2P.DISCOVERY_PORT = args.devp2p_port config.P2P.BOOTSTRAP_HOST = args.devp2p_bootstrap_host config.P2P.BOOTSTRAP_PORT = args.devp2p_bootstrap_port config.P2P.MIN_PEERS = args.devp2p_min_peers config.P2P.MAX_PEERS = args.devp2p_max_peers config.P2P.ADDITIONAL_BOOTSTRAPS = args.devp2p_additional_bootstraps else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port) config.SLAVE_LIST = [] for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)] config.SLAVE_LIST.append(slave_config) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config def to_dict(self): ret = super().to_dict() ret["QUARKCHAIN"] = self.QUARKCHAIN.to_dict() ret["MONITORING"] = self.MONITORING.to_dict() ret["MASTER"] = self.MASTER.to_dict() ret["SLAVE_LIST"] = [s.to_dict() for s in self.SLAVE_LIST] if self.P2P: ret["P2P"] = self.P2P.to_dict() del ret["SIMPLE_NETWORK"] else: ret["SIMPLE_NETWORK"] = self.SIMPLE_NETWORK.to_dict() del ret["P2P"] return ret @classmethod def from_dict(cls, d): config = super().from_dict(d) config.QUARKCHAIN = QuarkChainConfig.from_dict(config.QUARKCHAIN) config.MONITORING = MonitoringConfig.from_dict(config.MONITORING) config.MASTER = MasterConfig.from_dict(config.MASTER) config.SLAVE_LIST = [ SlaveConfig.from_dict(s) for s in config.SLAVE_LIST ] if "P2P" in d: config.P2P = P2PConfig.from_dict(d["P2P"]) else: config.SIMPLE_NETWORK = SimpleNetworkConfig.from_dict( d["SIMPLE_NETWORK"]) return config
def test_basic(self): config = QuarkChainConfig() config.ROOT = RootConfig() config.ROOT.CONSENSUS_TYPE = ConsensusType.POW_SIMULATE config.ROOT.CONSENSUS_CONFIG = POWConfig() config.ROOT.CONSENSUS_CONFIG.TARGET_BLOCK_TIME = 60 config.SHARD_LIST = [] for i in range(2): s = ShardConfig() s.CONSENSUS_TYPE = ConsensusType.POW_SHA3SHA3 s.CONSENSUS_CONFIG = POWConfig() config.SHARD_LIST.append(s) for i in range(2): s = ShardConfig() s.CONSENSUS_TYPE = ConsensusType.POW_ETHASH s.CONSENSUS_CONFIG = POWConfig() s.CONSENSUS_CONFIG.TARGET_BLOCK_TIME = 15 config.SHARD_LIST.append(s) for i in range(1): s = ShardConfig() config.SHARD_LIST.append(s) expected_json = """{ "SHARD_SIZE": 8, "MAX_NEIGHBORS": 32, "NETWORK_ID": 3, "TRANSACTION_QUEUE_SIZE_LIMIT_PER_SHARD": 10000, "BLOCK_EXTRA_DATA_SIZE_LIMIT": 1024, "PROOF_OF_PROGRESS_BLOCKS": 1, "GUARDIAN_PUBLIC_KEY": "ab856abd0983a82972021e454fcf66ed5940ed595b0898bcd75cbe2d0a51a00f5358b566df22395a2a8bf6c022c1d51a2c3defe654e91a8d244947783029694d", "GUARDIAN_PRIVATE_KEY": null, "P2P_PROTOCOL_VERSION": 0, "P2P_COMMAND_SIZE_LIMIT": 4294967295, "SKIP_ROOT_DIFFICULTY_CHECK": false, "SKIP_MINOR_DIFFICULTY_CHECK": false, "ROOT": { "MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF": 60, "CONSENSUS_TYPE": "POW_SIMULATE", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 60, "REMOTE_MINE": false }, "GENESIS": { "VERSION": 0, "HEIGHT": 0, "SHARD_SIZE": 32, "HASH_PREV_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "TIMESTAMP": 1519147489, "DIFFICULTY": 1000000, "NONCE": 0 }, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 120000000000000000000, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 40, "DIFFICULTY_ADJUSTMENT_FACTOR": 1024 }, "SHARD_LIST": [ { "CONSENSUS_TYPE": "POW_SHA3SHA3", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 10, "REMOTE_MINE": false }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "GAS_LIMIT": 12000000, "NONCE": 0, "ALLOC": {} }, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5000000000000000000, "GAS_LIMIT_EMA_DENOMINATOR": 1024, "GAS_LIMIT_ADJUSTMENT_FACTOR": 1024, "GAS_LIMIT_MINIMUM": 5000, "GAS_LIMIT_MAXIMUM": 9223372036854775807, "GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR": 3, "GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR": 2, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 7, "DIFFICULTY_ADJUSTMENT_FACTOR": 512, "EXTRA_SHARD_BLOCKS_IN_ROOT_BLOCK": 3 }, { "CONSENSUS_TYPE": "POW_SHA3SHA3", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 10, "REMOTE_MINE": false }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "GAS_LIMIT": 12000000, "NONCE": 0, "ALLOC": {} }, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5000000000000000000, "GAS_LIMIT_EMA_DENOMINATOR": 1024, "GAS_LIMIT_ADJUSTMENT_FACTOR": 1024, "GAS_LIMIT_MINIMUM": 5000, "GAS_LIMIT_MAXIMUM": 9223372036854775807, "GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR": 3, "GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR": 2, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 7, "DIFFICULTY_ADJUSTMENT_FACTOR": 512, "EXTRA_SHARD_BLOCKS_IN_ROOT_BLOCK": 3 }, { "CONSENSUS_TYPE": "POW_ETHASH", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 15, "REMOTE_MINE": false }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "GAS_LIMIT": 12000000, "NONCE": 0, "ALLOC": {} }, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5000000000000000000, "GAS_LIMIT_EMA_DENOMINATOR": 1024, "GAS_LIMIT_ADJUSTMENT_FACTOR": 1024, "GAS_LIMIT_MINIMUM": 5000, "GAS_LIMIT_MAXIMUM": 9223372036854775807, "GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR": 3, "GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR": 2, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 7, "DIFFICULTY_ADJUSTMENT_FACTOR": 512, "EXTRA_SHARD_BLOCKS_IN_ROOT_BLOCK": 3 }, { "CONSENSUS_TYPE": "POW_ETHASH", "CONSENSUS_CONFIG": { "TARGET_BLOCK_TIME": 15, "REMOTE_MINE": false }, "GENESIS": { "ROOT_HEIGHT": 0, "VERSION": 0, "HEIGHT": 0, "HASH_PREV_MINOR_BLOCK": "0000000000000000000000000000000000000000000000000000000000000000", "HASH_MERKLE_ROOT": "0000000000000000000000000000000000000000000000000000000000000000", "EXTRA_DATA": "497420776173207468652062657374206f662074696d65732c206974207761732074686520776f727374206f662074696d65732c202e2e2e202d20436861726c6573204469636b656e73", "TIMESTAMP": 1519147489, "DIFFICULTY": 10000, "GAS_LIMIT": 12000000, "NONCE": 0, "ALLOC": {} }, "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5000000000000000000, "GAS_LIMIT_EMA_DENOMINATOR": 1024, "GAS_LIMIT_ADJUSTMENT_FACTOR": 1024, "GAS_LIMIT_MINIMUM": 5000, "GAS_LIMIT_MAXIMUM": 9223372036854775807, "GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR": 3, "GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR": 2, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 7, "DIFFICULTY_ADJUSTMENT_FACTOR": 512, "EXTRA_SHARD_BLOCKS_IN_ROOT_BLOCK": 3 }, { "CONSENSUS_TYPE": "NONE", "COINBASE_ADDRESS": "000000000000000000000000000000000000000000000000", "COINBASE_AMOUNT": 5000000000000000000, "GAS_LIMIT_EMA_DENOMINATOR": 1024, "GAS_LIMIT_ADJUSTMENT_FACTOR": 1024, "GAS_LIMIT_MINIMUM": 5000, "GAS_LIMIT_MAXIMUM": 9223372036854775807, "GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR": 3, "GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR": 2, "DIFFICULTY_ADJUSTMENT_CUTOFF_TIME": 7, "DIFFICULTY_ADJUSTMENT_FACTOR": 512, "EXTRA_SHARD_BLOCKS_IN_ROOT_BLOCK": 3 } ], "REWARD_TAX_RATE": 0.5 }""" print(config.to_json()) self.assertEqual(config.to_json(), expected_json) deserialized_config = QuarkChainConfig.from_json(expected_json) self.assertEqual(deserialized_config.to_json(), expected_json)