def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--bootnode",
        default=
        "enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:29000",
        type=str,
    )
    parser.add_argument(
        "--privkey",
        default=
        "31552f186bf90908ce386fb547dd0410bf443309125cc43fd0ffd642959bf6d9",
        help="hex string of private key; if empty, will be auto-generated",
        type=str,
    )
    parser.add_argument(
        "--listen_port",
        default=29000,
        help="port for discovery UDP and P2P TCP connection",
        type=int,
    )
    parser.add_argument("--max_peers", default=10, type=int)
    parser.add_argument("--logging_level", default="info", type=str)
    parser.add_argument(
        "--upnp",
        default=False,
        action="store_true",
        help=
        "if set, will automatically set up port-fowarding if upnp devices that support port forwarding can be found",
    )
    args = parser.parse_args()

    Logger.set_logging_level(args.logging_level)

    if args.privkey:
        privkey = keys.PrivateKey(bytes.fromhex(args.privkey))
    else:
        privkey = ecies.generate_privkey()

    cancel_token = CancelToken("server")
    server = ParagonServer(
        privkey=privkey,
        port=args.listen_port,
        network_id=NETWORK_ID,
        bootstrap_nodes=tuple([kademlia.Node.from_uri(args.bootnode)]),
        token=cancel_token,
        upnp=args.upnp,
    )

    loop = asyncio.get_event_loop()
    # loop.set_debug(True)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, cancel_token.trigger)

    loop.run_until_complete(server.run())
    loop.run_until_complete(server.cancel())
    loop.close()
def main():
    Logger.set_logging_level("debug")
    loop = asyncio.get_event_loop()
    loop.set_debug(True)

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--bootnode",
        default="enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:29000",
        type=str,
    )
    parser.add_argument("--listen_host", default="127.0.0.1", type=str)
    parser.add_argument(
        "--listen_port",
        default=29000,
        help="port for discovery UDP and P2P TCP connection",
        type=int,
    )
    parser.add_argument("--max_peers", default=10, type=int)
    # private key of the bootnode above is 31552f186bf90908ce386fb547dd0410bf443309125cc43fd0ffd642959bf6d9
    parser.add_argument(
        "--privkey",
        default="",
        help="hex string of private key; if empty, will be auto-generated",
        type=str,
    )

    args = parser.parse_args()

    if args.privkey:
        privkey = keys.PrivateKey(bytes.fromhex(args.privkey))
    else:
        privkey = ecies.generate_privkey()
    addr = kademlia.Address(args.listen_host, args.listen_port, args.listen_port)
    bootstrap_nodes = tuple([kademlia.Node.from_uri(args.bootnode)])

    cancel_token = CancelToken("discovery")
    discovery = DiscoveryProtocol(privkey, addr, bootstrap_nodes, cancel_token)

    async def run() -> None:
        await loop.create_datagram_endpoint(
            lambda: discovery, local_addr=("0.0.0.0", args.listen_port)
        )
        try:
            await discovery.bootstrap()
            while True:
                Logger.info("Routing table size={}".format(len(discovery.routing)))
                await cancel_token.cancellable_wait(asyncio.sleep(5))
        except OperationCancelled:
            pass
        finally:
            await discovery.stop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, cancel_token.trigger)

    loop.run_until_complete(run())
    loop.close()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--privkey",
        default="",
        help="hex string of private key; if empty, will be auto-generated",
        type=str,
    )
    parser.add_argument(
        "--listen_port",
        default=29000,
        help="port for discovery UDP and P2P TCP connection",
        type=int,
    )
    parser.add_argument("--logging_level", default="info", type=str)
    args = parser.parse_args()

    Logger.set_logging_level(args.logging_level)

    if args.privkey:
        privkey = keys.PrivateKey(bytes.fromhex(args.privkey))
    else:
        privkey = ecies.generate_privkey()

    cancel_token = CancelToken("server")
    server = ParagonServer(
        privkey=privkey,
        port=args.listen_port,
        network_id=NETWORK_ID,
        token=cancel_token,
    )

    loop = asyncio.get_event_loop()
    loop.set_debug(True)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, cancel_token.trigger)

    loop.run_until_complete(server.run())
    loop.run_until_complete(server.cancel())
    loop.close()
    def create_from_args(cls, args):
        """ Create ClusterConfig either from the JSON file or cmd flags.
        """
        def __create_from_args_internal():
            check(
                is_p2(args.num_shards_per_chain),
                "--num_shards_per_chain must be power of 2",
            )
            check(is_p2(args.num_slaves), "--num_slaves must be power of 2")

            config = ClusterConfig()
            config.LOG_LEVEL = args.log_level
            config.DB_PATH_ROOT = args.db_path_root

            config.P2P_PORT = args.p2p_port
            config.JSON_RPC_PORT = args.json_rpc_port
            config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port
            config.JSON_RPC_HOST = args.json_rpc_host
            config.PRIVATE_JSON_RPC_HOST = args.json_rpc_private_host

            config.CLEAN = args.clean
            config.START_SIMULATED_MINING = args.start_simulated_mining
            config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history

            config.QUARKCHAIN.update(
                args.num_chains,
                args.num_shards_per_chain,
                args.root_block_interval_sec,
                args.minor_block_interval_sec,
                args.default_token,
            )
            config.QUARKCHAIN.NETWORK_ID = args.network_id

            config.GENESIS_DIR = args.genesis_dir

            config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address

            if args.p2p:
                config.SIMPLE_NETWORK = None
                config.P2P = P2PConfig()
                # p2p module
                config.P2P.BOOT_NODES = args.bootnodes
                config.P2P.PRIV_KEY = args.privkey
                config.P2P.MAX_PEERS = args.max_peers
                config.P2P.UPNP = args.upnp
            else:
                config.P2P = None
                config.SIMPLE_NETWORK = SimpleNetworkConfig()
                config.SIMPLE_NETWORK.BOOTSTRAP_HOST = (
                    args.simple_network_bootstrap_host)
                config.SIMPLE_NETWORK.BOOTSTRAP_PORT = (
                    args.simple_network_bootstrap_port)

            if args.prom:
                config.PROMETHEUS = PrometheusConfig()
                config.PROMETHEUS.INTERVAL = args.prom_interval
                config.PROMETHEUS.TOKENS = args.prom_tokens
                config.PROMETHEUS.PORT = args.prom_port

            config.SLAVE_LIST = []
            for i in range(args.num_slaves):
                slave_config = SlaveConfig()
                slave_config.PORT = args.port_start + i
                slave_config.ID = "S{}".format(i)
                slave_config.FULL_SHARD_ID_LIST = []
                config.SLAVE_LIST.append(slave_config)

            # assign full shard IDs to each slave, using hex strings to write into JSON
            full_shard_ids = [(i << 16) + args.num_shards_per_chain + j
                              for i in range(args.num_chains)
                              for j in range(args.num_shards_per_chain)]
            for i, full_shard_id in enumerate(full_shard_ids):
                slave = config.SLAVE_LIST[i % args.num_slaves]
                slave.FULL_SHARD_ID_LIST.append(full_shard_id)

            fd, config.json_filepath = tempfile.mkstemp()
            with os.fdopen(fd, "w") as tmp:
                tmp.write(config.to_json())
            return config

        if args.cluster_config:
            with open(args.cluster_config) as f:
                config = cls.from_json(f.read())
                config.json_filepath = args.cluster_config
        else:
            config = __create_from_args_internal()
        config.apply_env()
        Logger.set_logging_level(config.LOG_LEVEL)
        Logger.set_kafka_logger(config.kafka_logger)
        update_genesis_alloc(config)
        return config
Exemple #5
0
    def create_from_args(cls, args):
        """ Create ClusterConfig either from the JSON file or cmd flags.
        """

        def __create_from_args_internal():
            check(is_p2(args.num_shards), "--num_shards must be power of 2")
            check(is_p2(args.num_slaves), "--num_slaves must be power of 2")

            config = ClusterConfig()
            config.LOG_LEVEL = args.log_level
            config.DB_PATH_ROOT = args.db_path_root

            config.P2P_PORT = args.p2p_port
            config.JSON_RPC_PORT = args.json_rpc_port
            config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port

            config.CLEAN = args.clean
            config.START_SIMULATED_MINING = args.start_simulated_mining
            config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history

            config.QUARKCHAIN.update(
                args.num_shards,
                args.root_block_interval_sec,
                args.minor_block_interval_sec,
            )
            config.QUARKCHAIN.NETWORK_ID = args.network_id

            config.GENESIS_DIR = args.genesis_dir

            config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address

            if args.p2p:
                config.SIMPLE_NETWORK = None
                config.P2P = P2PConfig()
                # p2p module
                config.P2P.BOOT_NODES = args.bootnodes
                config.P2P.PRIV_KEY = args.privkey
                config.P2P.MAX_PEERS = args.max_peers
                config.P2P.UPNP = args.upnp
            else:
                config.P2P = None
                config.SIMPLE_NETWORK = SimpleNetworkConfig()
                config.SIMPLE_NETWORK.BOOTSTRAP_HOST = (
                    args.simple_network_bootstrap_host
                )
                config.SIMPLE_NETWORK.BOOTSTRAP_PORT = (
                    args.simple_network_bootstrap_port
                )

            config.SLAVE_LIST = []
            for i in range(args.num_slaves):
                slave_config = SlaveConfig()
                slave_config.PORT = args.port_start + i
                slave_config.ID = "S{}".format(i)
                slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)]

                config.SLAVE_LIST.append(slave_config)

            fd, config.json_filepath = tempfile.mkstemp()
            with os.fdopen(fd, "w") as tmp:
                tmp.write(config.to_json())
            return config

        if args.cluster_config:
            with open(args.cluster_config) as f:
                config = cls.from_json(f.read())
                config.json_filepath = args.cluster_config
        else:
            config = __create_from_args_internal()
        Logger.set_logging_level(config.LOG_LEVEL)
        Logger.set_kafka_logger(config.kafka_logger)
        update_genesis_alloc(config)
        return config
    def create_from_args(cls, args):
        """ Create ClusterConfig either from the JSON file or cmd flags.
        """
        def __create_from_args_internal():
            check(is_p2(args.num_shards), "--num_shards must be power of 2")
            check(is_p2(args.num_slaves), "--num_slaves must be power of 2")

            config = ClusterConfig()
            config.LOG_LEVEL = args.log_level
            config.DB_PATH_ROOT = args.db_path_root

            config.P2P_PORT = args.p2p_port
            config.JSON_RPC_PORT = args.json_rpc_port
            config.PRIVATE_JSON_RPC_PORT = args.json_rpc_private_port

            config.CLEAN = args.clean
            config.MINE = args.mine
            config.SLAVE_IDS = args.slave_ids
            config.IS_MASTER = args.is_master
            config.ENABLE_TRANSACTION_HISTORY = args.enable_transaction_history

            config.QUARKCHAIN.update(
                args.num_shards,
                args.root_block_interval_sec,
                args.minor_block_interval_sec,
            )
            config.QUARKCHAIN.NETWORK_ID = args.network_id

            config.GENESIS_DIR = args.genesis_dir

            config.MONITORING.KAFKA_REST_ADDRESS = args.monitoring_kafka_rest_address

            if args.devp2p_enable:
                config.SIMPLE_NETWORK = None
                config.P2P = P2PConfig()
                config.P2P.IP = args.devp2p_ip
                config.P2P.DISCOVERY_PORT = args.devp2p_port
                config.P2P.BOOTSTRAP_HOST = args.devp2p_bootstrap_host
                config.P2P.BOOTSTRAP_PORT = args.devp2p_bootstrap_port
                config.P2P.MIN_PEERS = args.devp2p_min_peers
                config.P2P.MAX_PEERS = args.devp2p_max_peers
                config.P2P.ADDITIONAL_BOOTSTRAPS = args.devp2p_additional_bootstraps
            else:
                config.P2P = None
                config.SIMPLE_NETWORK = SimpleNetworkConfig()
                config.SIMPLE_NETWORK.BOOTSTRAP_HOST = (
                    args.simple_network_bootstrap_host)
                config.SIMPLE_NETWORK.BOOTSTRAP_PORT = (
                    args.simple_network_bootstrap_port)

            config.SLAVE_LIST = []
            slave_ip_list = args.slave_ips.split(",")
            slave_ip_len = len(slave_ip_list)
            # if len(slave_ip_list) > 1:
            #     args.num_slaves = len(slave_ip_list)
            for i in range(args.num_slaves):
                slave_config = SlaveConfig()
                slave_config.IP = slave_ip_list[(i % slave_ip_len)]
                slave_config.PORT = args.port_start + i
                slave_config.ID = "S{}".format(i)
                slave_config.SHARD_MASK_LIST = [ShardMask(i | args.num_slaves)]

                config.SLAVE_LIST.append(slave_config)

            fd, config.json_filepath = tempfile.mkstemp()
            with os.fdopen(fd, "w") as tmp:
                tmp.write(config.to_json())
            return config

        if args.cluster_config:
            with open(args.cluster_config) as f:
                config = cls.from_json(f.read())
                config.json_filepath = args.cluster_config
        else:
            config = __create_from_args_internal()
        Logger.set_logging_level(config.LOG_LEVEL)
        Logger.set_kafka_logger(config.kafka_logger)
        update_genesis_alloc(config)
        return config