def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check(is_p2(args.num_shards), "--num_shards must be power of 2") check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.CLEAN = args.clean config.START_SIMULATED_MINING = args.start_simulated_mining config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_shards, args.root_block_interval_sec, args.minor_block_interval_sec, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.p2p: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() # p2p module config.P2P.BOOT_NODES = args.bootnodes config.P2P.PRIV_KEY = args.privkey config.P2P.MAX_PEERS = args.max_peers config.P2P.UPNP = args.upnp else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host ) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port ) config.SLAVE_LIST = [] for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)] config.SLAVE_LIST.append(slave_config) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config
def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check( is_p2(args.num_shards_per_chain), "--num_shards_per_chain must be power of 2", ) check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.JSON_RPC_HOST = args.json_rpc_host config.PRIVATE_JSON_RPC_HOST = args.json_rpc_private_host config.CLEAN = args.clean config.START_SIMULATED_MINING = args.start_simulated_mining config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_chains, args.num_shards_per_chain, args.root_block_interval_sec, args.minor_block_interval_sec, args.default_token, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.p2p: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() # p2p module config.P2P.BOOT_NODES = args.bootnodes config.P2P.PRIV_KEY = args.privkey config.P2P.MAX_PEERS = args.max_peers config.P2P.UPNP = args.upnp else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port) if args.prom: config.PROMETHEUS = PrometheusConfig() config.PROMETHEUS.INTERVAL = args.prom_interval config.PROMETHEUS.TOKENS = args.prom_tokens config.PROMETHEUS.PORT = args.prom_port config.SLAVE_LIST = [] for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.FULL_SHARD_ID_LIST = [] config.SLAVE_LIST.append(slave_config) # assign full shard IDs to each slave, using hex strings to write into JSON full_shard_ids = [(i << 16) + args.num_shards_per_chain + j for i in range(args.num_chains) for j in range(args.num_shards_per_chain)] for i, full_shard_id in enumerate(full_shard_ids): slave = config.SLAVE_LIST[i % args.num_slaves] slave.FULL_SHARD_ID_LIST.append(full_shard_id) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() config.apply_env() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config
def create_from_args(cls, args): """ Create ClusterConfig either from the JSON file or cmd flags. """ def __create_from_args_internal(): check(is_p2(args.num_shards), "--num_shards must be power of 2") check(is_p2(args.num_slaves), "--num_slaves must be power of 2") config = ClusterConfig() config.LOG_LEVEL = args.log_level config.DB_PATH_ROOT = args.db_path_root config.P2P_PORT = args.p2p_port config.JSON_RPC_PORT = args.json_rpc_port config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port config.CLEAN = args.clean config.MINE = args.mine config.SLAVE_IDS = args.slave_ids config.IS_MASTER = args.is_master config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history config.QUARKCHAIN.update( args.num_shards, args.root_block_interval_sec, args.minor_block_interval_sec, ) config.QUARKCHAIN.NETWORK_ID = args.network_id config.GENESIS_DIR = args.genesis_dir config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address if args.devp2p_enable: config.SIMPLE_NETWORK = None config.P2P = P2PConfig() config.P2P.IP = args.devp2p_ip config.P2P.DISCOVERY_PORT = args.devp2p_port config.P2P.BOOTSTRAP_HOST = args.devp2p_bootstrap_host config.P2P.BOOTSTRAP_PORT = args.devp2p_bootstrap_port config.P2P.MIN_PEERS = args.devp2p_min_peers config.P2P.MAX_PEERS = args.devp2p_max_peers config.P2P.ADDITIONAL_BOOTSTRAPS = args.devp2p_additional_bootstraps else: config.P2P = None config.SIMPLE_NETWORK = SimpleNetworkConfig() config.SIMPLE_NETWORK.BOOTSTRAP_HOST = ( args.simple_network_bootstrap_host) config.SIMPLE_NETWORK.BOOTSTRAP_PORT = ( args.simple_network_bootstrap_port) config.SLAVE_LIST = [] slave_ip_list = args.slave_ips.split(",") slave_ip_len = len(slave_ip_list) # if len(slave_ip_list) > 1: # args.num_slaves = len(slave_ip_list) for i in range(args.num_slaves): slave_config = SlaveConfig() slave_config.IP = slave_ip_list[(i % slave_ip_len)] slave_config.PORT = args.port_start + i slave_config.ID = "S{}".format(i) slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)] config.SLAVE_LIST.append(slave_config) fd, config.json_filepath = tempfile.mkstemp() with os.fdopen(fd, "w") as tmp: tmp.write(config.to_json()) return config if args.cluster_config: with open(args.cluster_config) as f: config = cls.from_json(f.read()) config.json_filepath = args.cluster_config else: config = __create_from_args_internal() Logger.set_logging_level(config.LOG_LEVEL) Logger.set_kafka_logger(config.kafka_logger) update_genesis_alloc(config) return config