def main(): conn = psycopg2.connect(connection_str) cursor = conn.cursor() ip_dict = getIpAddressMap(cursor) cursor.close() conn.close() # Use Custom config settings.setup("./config.json") settings.set_max_peers(500) # Setup the blockchain blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) NodeLeader.Instance().Start() # Start a thread with custom code d = threading.Thread(target=custom_background_code, args=( connection_str, ip_dict, )) d.setDaemon( True ) # daemonizing the thread will kill it when the main thread is quit d.start() # Run all the things (blocking call) reactor.run() logger.info("Shutting down.")
def execute(self, arguments): c1 = get_arg(arguments) if c1 is not None: try: current_max = settings.CONNECTED_PEER_MAX settings.set_max_peers(c1) c1 = int(c1) p_len = len(NodeLeader.Instance().Peers) if c1 < current_max and c1 < p_len: to_remove = p_len - c1 peers = NodeLeader.Instance().Peers for i in range(to_remove): peer = peers[-1] # disconnect last peer added first peer.Disconnect("Max connected peers reached", isDead=False) peers.pop() print(f"Maxpeers set to {c1}") return c1 except ValueError: print("Please supply a positive integer for maxpeers") return else: print(f"Maintaining maxpeers at {settings.CONNECTED_PEER_MAX}") return
def test_getpeer_list_vs_maxpeer_list(self): """https://github.com/CityOfZion/neo-python/issues/678""" settings.set_max_peers(1) api_server = JsonRpcApi(None, None) # test we start with a clean state peers = api_server.get_peers() self.assertEqual(len(peers['connected']), 0) # try connecting more nodes than allowed by the max peers settings first_node = self._add_new_node('127.0.0.1', 1111) second_node = self._add_new_node('127.0.0.2', 2222) peers = api_server.get_peers() # should respect max peer setting self.assertEqual(1, len(peers['connected'])) self.assertEqual('127.0.0.1', peers['connected'][0]['address']) self.assertEqual(1111, peers['connected'][0]['port']) # now drop the existing node self.factory.clientConnectionLost(first_node, reason="unittest") # add a new one second_node = self._add_new_node('127.0.0.2', 2222) # and test if `first_node` we dropped can pass limit checks when it reconnects self.leader.PeerCheckLoop() peers = api_server.get_peers() self.assertEqual(1, len(peers['connected'])) self.assertEqual('127.0.0.2', peers['connected'][0]['address']) self.assertEqual(2222, peers['connected'][0]['port']) # restore default settings settings.set_max_peers(5)
def set_max_peers(num_peers) -> bool: try: settings.set_max_peers(num_peers) print("Maxpeers set to ", num_peers) return True except ValueError: print("Please supply a positive integer for maxpeers") return False
def execute(self, arguments): c1 = get_arg(arguments) if c1 is not None: try: c1 = int(c1) except ValueError: print("Invalid argument") return if c1 > 10: print("Max peers is limited to 10") return try: settings.set_max_peers(c1) if c1 < settings.CONNECTED_PEER_MIN: settings.set_min_peers(c1) print(f"Minpeers set to {c1}") except ValueError: print("Please supply a positive integer for maxpeers") return nodemgr = NodeManager() nodemgr.max_clients = c1 current_max = settings.CONNECTED_PEER_MAX connected_count = len(nodemgr.nodes) if current_max < connected_count: to_remove = connected_count - c1 for _ in range(to_remove): last_connected_node = nodemgr.nodes[-1] wait_for(last_connected_node.disconnect() ) # need to avoid it being labelled as dead/bad print(f"Maxpeers set to {c1}") return c1 else: print(f"Maintaining maxpeers at {settings.CONNECTED_PEER_MAX}") return
def run(self): # bl: changing to 8 as recommended in the 8-10 range by localhuman (previously had this at 150) settings.set_max_peers(8) # Setup the blockchain self.blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(self.blockchain) NodeLeader.Instance().Start() dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) Blockchain.Default().PersistBlocks() # Disable smart contract events for external smart contracts settings.set_log_smart_contract_events(False) # if the wallet was set up (by setting a path and loading the password), then open it! if self.wallet_path: self.wallet_open() # Start a thread with custom code d = threading.Thread(target=self.whitelist_addresses) d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit d.start() # invoke any pre-start action that needs to occur before we start the reactor. # optional for subclasses to implement. self.pre_start() # Run all the things (blocking call) self.logger.info("Everything setup and running. Waiting for events...") reactor.run() self.logger.info("Shutting down") if self.wallet_path: self.wallet_close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown() self.logger.info("Shut down.")
def main(): parser = argparse.ArgumentParser() # Network options group_network_container = parser.add_argument_group( title="Network options") group_network = group_network_container.add_mutually_exclusive_group( required=True) group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet") group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet") group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet") group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet") group_network.add_argument("--config", action="store", help="Use a specific config file") # Ports for RPC and REST api group_modes = parser.add_argument_group(title="Mode(s)") group_modes.add_argument( "--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)") group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)") # Advanced logging setup group_logging = parser.add_argument_group(title="Logging options") group_logging.add_argument("--logfile", action="store", type=str, help="Logfile") group_logging.add_argument( "--syslog", action="store_true", help= "Log to syslog instead of to log file ('user' is the default facility)" ) group_logging.add_argument( "--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help= "Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0')." ) group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # host parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0") # Now parse args = parser.parse_args() # print(args) if not args.port_rpc and not args.port_rest: print("Error: specify at least one of --port-rpc / --port-rest") parser.print_help() return if args.port_rpc == args.port_rest: print("Error: --port-rpc and --port-rest cannot be the same") parser.print_help() return if args.logfile and (args.syslog or args.syslog_local): print("Error: Cannot only use logfile or syslog at once") parser.print_help() return # Setting the datadir must come before setting the network, else the wrong path is checked at net setup. if args.datadir: settings.set_data_dir(args.datadir) # Network configuration depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.testnet: settings.setup_testnet() elif args.privnet: settings.setup_privnet() elif args.coznet: settings.setup_coznet() if args.maxpeers: settings.set_max_peers(args.maxpeers) if args.syslog or args.syslog_local is not None: # Setup the syslog facility if args.syslog_local is not None: print("Logging to syslog local%s facility" % args.syslog_local) syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local else: print("Logging to syslog user facility") syslog_facility = SysLogHandler.LOG_USER # Setup logzero to only use the syslog handler logzero.syslog(facility=syslog_facility) else: # Setup file logging if args.logfile: logfile = os.path.abspath(args.logfile) if args.disable_stderr: print("Logging to logfile: %s" % logfile) else: print("Logging to stderr and logfile: %s" % logfile) logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr) else: print("Logging to stdout and stderr") # Disable logging smart contract events settings.set_log_smart_contract_events(False) # Write a PID file to easily quit the service write_pid_file() # Setup Twisted and Klein logging to use the logzero setup observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER) globalLogPublisher.addObserver(observer) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) # Setup twisted reactor, NodeLeader and start the NotificationDB reactor.suggestThreadPoolSize(15) NodeLeader.Instance().Start() NotificationDB.instance().start() # Start a thread with custom code d = threading.Thread(target=custom_background_code) d.setDaemon( True ) # daemonizing the thread will kill it when the main thread is quit d.start() if args.port_rpc: logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc)) api_server_rpc = JsonRpcApi(args.port_rpc) endpoint_rpc = "tcp:port={0}:interface={1}".format( args.port_rpc, args.host) endpoints.serverFromString(reactor, endpoint_rpc).listen( Site(api_server_rpc.app.resource())) # reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc)) # api_server_rpc.app.run(args.host, args.port_rpc) if args.port_rest: logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest)) api_server_rest = RestApi() endpoint_rest = "tcp:port={0}:interface={1}".format( args.port_rest, args.host) endpoints.serverFromString(reactor, endpoint_rest).listen( Site(api_server_rest.app.resource())) # api_server_rest.app.run(args.host, args.port_rest) reactor.run() # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown()
def test_peer_adding(self): leader = NodeLeader.Instance() Blockchain.Default()._block_cache = {'hello': 1} def mock_call_later(delay, method, *args): method(*args) def mock_connect_tcp(host, port, factory, timeout=120): node = NeoNode() node.endpoint = Endpoint(host, port) leader.AddConnectedPeer(node) return node def mock_disconnect(peer): return True def mock_send_msg(node, message): return True settings.set_max_peers(len(settings.SEED_LIST)) with patch('twisted.internet.reactor.connectTCP', mock_connect_tcp): with patch('twisted.internet.reactor.callLater', mock_call_later): with patch('neo.Network.NeoNode.NeoNode.Disconnect', mock_disconnect): with patch( 'neo.Network.NeoNode.NeoNode.SendSerializedMessage', mock_send_msg): leader.Start() self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now test adding another leader.RemoteNodePeerReceived('hello.com', 1234, 6) # it shouldnt add anything so it doesnt go over max connected peers self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # test adding peer peer = NeoNode() peer.endpoint = Endpoint('hellloo.com', 12344) leader.ADDRS.append('hellloo.com:12344') leader.AddConnectedPeer(peer) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now get a peer peer = leader.Peers[0] leader.RemoveConnectedPeer(peer) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST) - 1) self.assertEqual(len(leader.ADDRS), len(settings.SEED_LIST)) # now test adding another leader.RemoteNodePeerReceived('hello.com', 1234, 6) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now if we remove all peers, it should restart peers = leader.Peers[:] for peer in peers: leader.RemoveConnectedPeer(peer) # test reset leader.ResetBlockRequestsAndCache() self.assertEqual(Blockchain.Default()._block_cache, {})
def configure(self, args): what = get_arg(args) if what == 'debug': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("Debug logging is now enabled") settings.set_loglevel(logging.DEBUG) if c1 == 'off' or c1 == '0': print("Debug logging is now disabled") settings.set_loglevel(logging.INFO) else: print("Cannot configure log. Please specify on|off") elif what == 'sc-events': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("Smart contract event logging is now enabled") settings.set_log_smart_contract_events(True) if c1 == 'off' or c1 == '0': print("Smart contract event logging is now disabled") settings.set_log_smart_contract_events(False) else: print("Cannot configure log. Please specify on|off") elif what == 'sc-debug-notify': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("Smart contract emit Notify events on execution failure is now enabled") settings.set_emit_notify_events_on_sc_execution_error(True) if c1 == 'off' or c1 == '0': print("Smart contract emit Notify events on execution failure is now disabled") settings.set_emit_notify_events_on_sc_execution_error(False) else: print("Cannot configure log. Please specify on|off") elif what == 'vm-log': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("VM instruction execution logging is now enabled") settings.set_log_vm_instruction(True) if c1 == 'off' or c1 == '0': print("VM instruction execution logging is now disabled") settings.set_log_vm_instruction(False) else: print("Cannot configure VM instruction logging. Please specify on|off") elif what == 'maxpeers': try: c1 = int(get_arg(args, 1).lower()) num_peers = int(c1) if num_peers > 0: old_max_peers = settings.CONNECTED_PEER_MAX settings.set_max_peers(num_peers) NodeLeader.Instance().OnUpdatedMaxPeers(old_max_peers, num_peers) print("set max peers to %s " % num_peers) else: print("Please specify integer greater than zero") except Exception as e: print("Cannot configure max peers. Please specify an integer greater than 0") else: print( "Cannot configure %s try 'config sc-events on|off', 'config debug on|off', 'config sc-debug-notify on|off' or 'config vm-log on|off'" % what)
def main(): parser = argparse.ArgumentParser() # Network group group = parser.add_mutually_exclusive_group() group.add_argument("-m", "--mainnet", action="store_true", default=False, help="Use MainNet instead of the default TestNet") group.add_argument("-p", "--privnet", nargs="?", metavar="host", const=True, default=False, help="Use a private net instead of the default TestNet, optionally using a custom host (default: 127.0.0.1)") group.add_argument("--coznet", action="store_true", default=False, help="Use the CoZ network instead of the default TestNet") group.add_argument("-c", "--config", action="store", help="Use a specific config file") # Theme parser.add_argument("-t", "--set-default-theme", dest="theme", choices=["dark", "light"], help="Set the default theme to be loaded from the config file. Default: 'dark'") # Verbose parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show smart-contract events by default") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # Show the neo-python version parser.add_argument("--version", action="version", version="neo-python v{version}".format(version=__version__)) args = parser.parse_args() # Setup depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.privnet: try: settings.setup_privnet(args.privnet) except PrivnetConnectionError as e: logger.error(str(e)) return elif args.coznet: settings.setup_coznet() if args.theme: preferences.set_theme(args.theme) if args.verbose: settings.set_log_smart_contract_events(True) if args.datadir: settings.set_data_dir(args.datadir) if args.maxpeers: settings.set_max_peers(args.maxpeers) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) # Try to set up a notification db if NotificationDB.instance(): NotificationDB.instance().start() # Start the prompt interface cli = PromptInterface() # Run things # reactor.suggestThreadPoolSize(15) reactor.callInThread(cli.run) NodeLeader.Instance().Start() # reactor.run() is blocking, until `quit()` is called which stops the reactor. reactor.run() # After the reactor is stopped, gracefully shutdown the database. NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown()
def configure(self, args): what = get_arg(args) if what == 'debug': c1 = get_arg(args, 1) if c1 is not None: c1 = c1.lower() if c1 == 'on' or c1 == '1': print("Debug logging is now enabled") settings.set_loglevel(logging.DEBUG) elif c1 == 'off' or c1 == '0': print("Debug logging is now disabled") settings.set_loglevel(logging.INFO) else: print("Cannot configure log. Please specify on|off") else: print("Cannot configure log. Please specify on|off") elif what == 'sc-events': c1 = get_arg(args, 1) if c1 is not None: c1 = c1.lower() if c1 == 'on' or c1 == '1': print("Smart contract event logging is now enabled") settings.set_log_smart_contract_events(True) elif c1 == 'off' or c1 == '0': print("Smart contract event logging is now disabled") settings.set_log_smart_contract_events(False) else: print("Cannot configure log. Please specify on|off") else: print("Cannot configure log. Please specify on|off") elif what == 'sc-debug-notify': c1 = get_arg(args, 1) if c1 is not None: c1 = c1.lower() if c1 == 'on' or c1 == '1': print( "Smart contract emit Notify events on execution failure is now enabled" ) settings.set_emit_notify_events_on_sc_execution_error(True) elif c1 == 'off' or c1 == '0': print( "Smart contract emit Notify events on execution failure is now disabled" ) settings.set_emit_notify_events_on_sc_execution_error( False) else: print("Cannot configure log. Please specify on|off") else: print("Cannot configure log. Please specify on|off") elif what == 'vm-log': c1 = get_arg(args, 1) if c1 is not None: c1 = c1.lower() if c1 == 'on' or c1 == '1': print("VM instruction execution logging is now enabled") settings.set_log_vm_instruction(True) elif c1 == 'off' or c1 == '0': print("VM instruction execution logging is now disabled") settings.set_log_vm_instruction(False) else: print( "Cannot configure VM instruction logging. Please specify on|off" ) else: print( "Cannot configure VM instruction logging. Please specify on|off" ) elif what == 'node-requests': if len(args) in [2, 3]: if len(args) == 3: NodeLeader.Instance().setBlockReqSizeAndMax( int(args[1]), int(args[2])) elif len(args) == 2: NodeLeader.Instance().setBlockReqSizeByName(args[1]) else: print("Invalid number of arguments") elif what == 'maxpeers': c1 = get_arg(args, 1) if c1 is not None: print("Maxpeers set to ", c1) settings.set_max_peers(c1) else: print("Maintaining current number of maxpeers") elif what == 'compiler-nep8': c1 = get_arg(args, 1) if c1 is not None: c1 = c1.lower() if c1 == 'on' or c1 == '1': print("Compiler NEP8 instructions on") settings.COMPILER_NEP_8 = True elif c1 == 'off' or c1 == '0': print("Compiler NEP8 instructions off") settings.COMPILER_NEP_8 = False else: print( "Cannot configure compiler NEP8 instructions. Please specify on|off" ) else: print( "Cannot configure compiler NEP8 instructions. Please specify on|off" ) else: print( "Cannot configure %s try 'config sc-events on|off', 'config debug on|off', 'config sc-debug-notify on|off', 'config vm-log on|off', config compiler-nep8 on|off, or 'config maxpeers {num_peers}'" % what)
def main(): parser = argparse.ArgumentParser() # Network group group = parser.add_mutually_exclusive_group() group.add_argument("-m", "--mainnet", action="store_true", default=False, help="Use MainNet instead of the default TestNet") group.add_argument( "-p", "--privnet", nargs="?", metavar="host", const=True, default=False, help= "Use a private net instead of the default TestNet, optionally using a custom host (default: 127.0.0.1)" ) group.add_argument( "--coznet", action="store_true", default=False, help="Use the CoZ network instead of the default TestNet") group.add_argument("-c", "--config", action="store", help="Use a specific config file") # Theme parser.add_argument( "-t", "--set-default-theme", dest="theme", choices=["dark", "light"], help= "Set the default theme to be loaded from the config file. Default: 'dark'" ) # Verbose parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show smart-contract events by default") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # Show the neo-python version parser.add_argument( "--version", action="version", version="neo-python v{version}".format(version=__version__)) args = parser.parse_args() # Setting the datadir must come before setting the network, else the wrong path is checked at net setup. if args.datadir: settings.set_data_dir(args.datadir) # Setup depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.privnet: try: settings.setup_privnet(args.privnet) except PrivnetConnectionError as e: logger.error(str(e)) return elif args.coznet: settings.setup_coznet() # Logfile settings & setup logfile_fn = os.path.join(settings.DATA_DIR_PATH, 'prompt.log') logfile_max_bytes = 5e7 # 50 MB logfile_backup_count = 3 # 3 logfiles history settings.set_logfile(logfile_fn, logfile_max_bytes, logfile_backup_count) if args.theme: preferences.set_theme(args.theme) if args.verbose: settings.set_log_smart_contract_events(True) if args.maxpeers: settings.set_max_peers(args.maxpeers) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) # Try to set up a notification db if NotificationDB.instance(): NotificationDB.instance().start() # Start the prompt interface fn_prompt_history = os.path.join(settings.DATA_DIR_PATH, '.prompt.py.history') cli = PromptInterface(fn_prompt_history) # Run things reactor.callInThread(cli.run) NodeLeader.Instance().Start() # reactor.run() is blocking, until `quit()` is called which stops the reactor. reactor.run() # After the reactor is stopped, gracefully shutdown the database. NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown()
def run(self): # bl: changing to 15 so that we can get connections with a high number to improve transaction relayability settings.set_max_peers(15) loop = asyncio.get_event_loop() # because a KeyboardInterrupt is so violent it can shutdown the DB in an unpredictable state. loop.add_signal_handler(SIGINT, quit) loop.add_signal_handler(SIGHUP, quit) loop.add_signal_handler(SIGTERM, quit) # Disable smart contract events for external smart contracts settings.set_log_smart_contract_events(False) # Instantiate the blockchain and subscribe to notifications blockchain = Blockchain( DBFactory.getBlockchainDB(settings.chain_leveldb_path)) Blockchain.RegisterBlockchain(blockchain) # Try to set up a notification db if NotificationDB.instance(): NotificationDB.instance().start() # if the wallet was set up (by setting a path and loading the password), then open it! if self.wallet_path: self.wallet_open() # invoke any pre-start action that needs to occur before we start the loop. # optional for subclasses to implement. self.pre_start() blockchain_main_task = loop.create_task(self.run_loop()) p2p = NetworkService() loop.create_task(p2p.start()) async def shutdown(): all_tasks = asyncio.all_tasks() for task in all_tasks: task.cancel() with suppress(asyncio.CancelledError): await task try: loop.run_forever() except (SystemExit, KeyboardInterrupt): with suppress((SystemExit, Exception)): blockchain_main_task.exception() loop.run_until_complete(p2p.shutdown()) loop.run_until_complete(shutdown()) loop.run_until_complete(loop.shutdown_asyncgens()) loop.stop() finally: loop.close() # Run things if self.wallet_path: logger.info("Closing wallet file %s" % self.wallet_path) asyncio.run(self.wallet_close()) # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose()
def main(): parser = argparse.ArgumentParser() # Network options group_network_container = parser.add_argument_group(title="Network options") group_network = group_network_container.add_mutually_exclusive_group(required=True) group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet") group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet") group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet") group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet") group_network.add_argument("--config", action="store", help="Use a specific config file") # Ports for RPC and REST api group_modes = parser.add_argument_group(title="Mode(s)") group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)") group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)") # Advanced logging setup group_logging = parser.add_argument_group(title="Logging options") group_logging.add_argument("--logfile", action="store", type=str, help="Logfile") group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)") group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').") group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # host parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0") # Now parse args = parser.parse_args() # print(args) if not args.port_rpc and not args.port_rest: print("Error: specify at least one of --port-rpc / --port-rest") parser.print_help() return if args.port_rpc == args.port_rest: print("Error: --port-rpc and --port-rest cannot be the same") parser.print_help() return if args.logfile and (args.syslog or args.syslog_local): print("Error: Cannot only use logfile or syslog at once") parser.print_help() return # Setup depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.testnet: settings.setup_testnet() elif args.privnet: settings.setup_privnet() elif args.coznet: settings.setup_coznet() if args.datadir: settings.set_data_dir(args.datadir) if args.maxpeers: settings.set_max_peers(args.maxpeers) if args.syslog or args.syslog_local is not None: # Setup the syslog facility if args.syslog_local is not None: print("Logging to syslog local%s facility" % args.syslog_local) syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local else: print("Logging to syslog user facility") syslog_facility = SysLogHandler.LOG_USER # Setup logzero to only use the syslog handler logzero.syslog(facility=syslog_facility) else: # Setup file logging if args.logfile: logfile = os.path.abspath(args.logfile) if args.disable_stderr: print("Logging to logfile: %s" % logfile) else: print("Logging to stderr and logfile: %s" % logfile) logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr) else: print("Logging to stdout and stderr") # Disable logging smart contract events settings.set_log_smart_contract_events(False) # Write a PID file to easily quit the service write_pid_file() # Setup Twisted and Klein logging to use the logzero setup observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER) globalLogPublisher.addObserver(observer) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) # Setup twisted reactor, NodeLeader and start the NotificationDB reactor.suggestThreadPoolSize(15) NodeLeader.Instance().Start() NotificationDB.instance().start() # Start a thread with custom code d = threading.Thread(target=custom_background_code) d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit d.start() if args.port_rpc: logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc)) api_server_rpc = JsonRpcApi(args.port_rpc) # endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host) # endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource())) # reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc)) api_server_rpc.app.run(args.host, args.port_rpc) if args.port_rest: logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest)) api_server_rest = RestApi() # endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host) # endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource())) api_server_rest.app.run(args.host, args.port_rest) reactor.run() # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown()
def test_peer_adding(self): leader = NodeLeader.Instance() Blockchain.Default()._block_cache = {'hello': 1} def mock_call_later(delay, method, *args): method(*args) def mock_connect_tcp(host, port, factory): node = NeoNode() node.endpoint = Endpoint(host, port) leader.AddConnectedPeer(node) return node def mock_disconnect(peer): return True def mock_send_msg(node, message): return True settings.set_max_peers(len(settings.SEED_LIST)) with patch('twisted.internet.reactor.connectTCP', mock_connect_tcp): with patch('twisted.internet.reactor.callLater', mock_call_later): with patch('neo.Network.NeoNode.NeoNode.Disconnect', mock_disconnect): with patch('neo.Network.NeoNode.NeoNode.SendSerializedMessage', mock_send_msg): leader.Start() self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now test adding another leader.RemoteNodePeerReceived('hello.com', 1234, 6) # it shouldnt add anything so it doesnt go over max connected peers self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # test adding peer peer = NeoNode() peer.endpoint = Endpoint('hellloo.com', 12344) leader.ADDRS.append('hellloo.com:12344') leader.AddConnectedPeer(peer) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now get a peer peer = leader.Peers[0] leader.RemoveConnectedPeer(peer) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST) - 1) self.assertEqual(len(leader.ADDRS), len(settings.SEED_LIST) - 1) # now test adding another leader.RemoteNodePeerReceived('hello.com', 1234, 6) self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # now on updated max peers test leader.OnUpdatedMaxPeers(settings.CONNECTED_PEER_MAX, settings.CONNECTED_PEER_MAX - 1) leader.OnUpdatedMaxPeers(settings.CONNECTED_PEER_MAX - 1, 10) # now if we remove all peers, it should restart peers = leader.Peers[:] for peer in peers: leader.RemoveConnectedPeer(peer) # and peers should be equal to the seed list self.assertEqual(len(leader.Peers), len(settings.SEED_LIST)) # test reset leader.ResetBlockRequestsAndCache() self.assertEqual(Blockchain.Default()._block_cache, {})
def main(): parser = argparse.ArgumentParser() # Network options group_network_container = parser.add_argument_group(title="Network options") group_network = group_network_container.add_mutually_exclusive_group(required=True) group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet") group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet") group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet") group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet") group_network.add_argument("--config", action="store", help="Use a specific config file") # Ports for RPC and REST api group_modes = parser.add_argument_group(title="Mode(s)") group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)") group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)") # Advanced logging setup group_logging = parser.add_argument_group(title="Logging options") group_logging.add_argument("--logfile", action="store", type=str, help="Logfile") group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)") group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').") group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # If a wallet should be opened parser.add_argument("--wallet", action="store", help="Open wallet. Will allow you to use methods that require an open wallet") # host parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0") # Now parse args = parser.parse_args() # print(args) if not args.port_rpc and not args.port_rest: print("Error: specify at least one of --port-rpc / --port-rest") parser.print_help() return if args.port_rpc == args.port_rest: print("Error: --port-rpc and --port-rest cannot be the same") parser.print_help() return if args.logfile and (args.syslog or args.syslog_local): print("Error: Cannot only use logfile or syslog at once") parser.print_help() return # Setting the datadir must come before setting the network, else the wrong path is checked at net setup. if args.datadir: settings.set_data_dir(args.datadir) # Network configuration depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.testnet: settings.setup_testnet() elif args.privnet: settings.setup_privnet() elif args.coznet: settings.setup_coznet() if args.maxpeers: try: settings.set_max_peers(args.maxpeers) print("Maxpeers set to ", args.maxpeers) except ValueError: print("Please supply a positive integer for maxpeers") return if args.syslog or args.syslog_local is not None: # Setup the syslog facility if args.syslog_local is not None: print("Logging to syslog local%s facility" % args.syslog_local) syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local else: print("Logging to syslog user facility") syslog_facility = SysLogHandler.LOG_USER # Setup logzero to only use the syslog handler logzero.syslog(facility=syslog_facility) else: # Setup file logging if args.logfile: logfile = os.path.abspath(args.logfile) if args.disable_stderr: print("Logging to logfile: %s" % logfile) else: print("Logging to stderr and logfile: %s" % logfile) logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr) else: print("Logging to stdout and stderr") if args.wallet: if not os.path.exists(args.wallet): print("Wallet file not found") return passwd = os.environ.get('NEO_PYTHON_JSONRPC_WALLET_PASSWORD', None) if not passwd: passwd = prompt("[password]> ", is_password=True) password_key = to_aes_key(passwd) try: wallet = UserWallet.Open(args.wallet, password_key) except Exception as e: print(f"Could not open wallet {e}") return else: wallet = None # Disable logging smart contract events settings.set_log_smart_contract_events(False) # Write a PID file to easily quit the service write_pid_file() # Setup Twisted and Klein logging to use the logzero setup observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER) globalLogPublisher.addObserver(observer) def loopingCallErrorHandler(error): logger.info("Error in loop: %s " % error) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) start_block_persisting() # If a wallet is open, make sure it processes blocks if wallet: walletdb_loop = task.LoopingCall(wallet.ProcessBlocks) wallet_loop_deferred = walletdb_loop.start(1) wallet_loop_deferred.addErrback(loopingCallErrorHandler) # Setup twisted reactor, NodeLeader and start the NotificationDB reactor.suggestThreadPoolSize(15) NodeLeader.Instance().Start() NotificationDB.instance().start() # Start a thread with custom code d = threading.Thread(target=custom_background_code) d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit d.start() if args.port_rpc: logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc)) try: rpc_class = load_class_from_path(settings.RPC_SERVER) except ValueError as err: logger.error(err) sys.exit() api_server_rpc = rpc_class(args.port_rpc, wallet=wallet) endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host) endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource())) if args.port_rest: logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest)) try: rest_api = load_class_from_path(settings.REST_SERVER) except ValueError as err: logger.error(err) sys.exit() api_server_rest = rest_api() endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host) endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource())) reactor.addSystemEventTrigger('before', 'shutdown', stop_block_persisting) reactor.run() # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown() if wallet: wallet.Close()
def test_config_maxpeers(self): nodemgr = NodeManager() nodemgr.reset_for_test() # test no input and verify output confirming current maxpeers with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers'] res = CommandConfig().execute(args) self.assertFalse(res) self.assertEqual(settings.CONNECTED_PEER_MAX, 10) self.assertIn( f"Maintaining maxpeers at {settings.CONNECTED_PEER_MAX}", mock_print.getvalue()) # test changing the number of maxpeers with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers', "6"] res = CommandConfig().execute(args) self.assertTrue(res) self.assertEqual(settings.CONNECTED_PEER_MAX, 6) self.assertEqual(int(res), settings.CONNECTED_PEER_MAX) self.assertIn(f"Maxpeers set to {settings.CONNECTED_PEER_MAX}", mock_print.getvalue()) # test trying to set maxpeers > 10 with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers', "12"] res = CommandConfig().execute(args) self.assertFalse(res) self.assertIn("Max peers is limited to 10", mock_print.getvalue()) # test bad input with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers', "blah"] res = CommandConfig().execute(args) self.assertFalse(res) self.assertIn("Invalid argument", mock_print.getvalue()) # test negative number with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers', "-1"] res = CommandConfig().execute(args) self.assertFalse(res) self.assertIn("Please supply a positive integer for maxpeers", mock_print.getvalue()) # test if the new maxpeers < settings.CONNECTED_PEER_MAX # first make sure we have a predictable state node1 = NeoNode(object, object) node2 = NeoNode(object, object) node1.address = "127.0.0.1:20333" node2.address = "127.0.0.1:20334" nodemgr.nodes = [node1, node2] nodemgr.loop = object with patch("neo.Network.node.NeoNode.disconnect") as mock_disconnect: # first test if the number of connected peers !< new maxpeers self.assertEqual(nodemgr.max_clients, 6) # verifying the current number of maxpeers with patch('sys.stdout', new=StringIO()) as mock_print: args = ['maxpeers', "4"] res = CommandConfig().execute(args) self.assertTrue(res) self.assertEqual(nodemgr.max_clients, 4) self.assertFalse(mock_disconnect.called) self.assertEqual(settings.CONNECTED_PEER_MAX, 4) self.assertIn(f"Maxpeers set to {settings.CONNECTED_PEER_MAX}", mock_print.getvalue()) # now test if the number of connected peers < new maxpeers and < current minpeers self.assertEqual(settings.CONNECTED_PEER_MIN, 4) # verifying the current minpeers value with patch('sys.stdout', new=StringIO()) as mock_print: with patch('neo.Prompt.Commands.Config.wait_for'): args = ['maxpeers', "1"] res = CommandConfig().execute(args) self.assertTrue(res) self.assertEqual(nodemgr.max_clients, 1) self.assertTrue(mock_disconnect.called) self.assertEqual(settings.CONNECTED_PEER_MAX, 1) self.assertIn( f"Maxpeers set to {settings.CONNECTED_PEER_MAX}", mock_print.getvalue()) self.assertEqual(settings.CONNECTED_PEER_MIN, 1) self.assertIn( f"Minpeers set to {settings.CONNECTED_PEER_MIN}", mock_print.getvalue()) # reset for future tests nodemgr.reset_for_test() nodemgr.loop = None settings.set_max_peers(10)
def configure(self, args): what = get_arg(args) if what == 'debug': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("Debug logging is now enabled") settings.set_loglevel(logging.DEBUG) if c1 == 'off' or c1 == '0': print("Debug logging is now disabled") settings.set_loglevel(logging.INFO) else: print("Cannot configure log. Please specify on|off") elif what == 'sc-events': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("Smart contract event logging is now enabled") settings.set_log_smart_contract_events(True) if c1 == 'off' or c1 == '0': print("Smart contract event logging is now disabled") settings.set_log_smart_contract_events(False) else: print("Cannot configure log. Please specify on|off") elif what == 'sc-debug-notify': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print( "Smart contract emit Notify events on execution failure is now enabled" ) settings.set_emit_notify_events_on_sc_execution_error(True) if c1 == 'off' or c1 == '0': print( "Smart contract emit Notify events on execution failure is now disabled" ) settings.set_emit_notify_events_on_sc_execution_error( False) else: print("Cannot configure log. Please specify on|off") elif what == 'vm-log': c1 = get_arg(args, 1).lower() if c1 is not None: if c1 == 'on' or c1 == '1': print("VM instruction execution logging is now enabled") settings.set_log_vm_instruction(True) if c1 == 'off' or c1 == '0': print("VM instruction execution logging is now disabled") settings.set_log_vm_instruction(False) else: print( "Cannot configure VM instruction logging. Please specify on|off" ) elif what == 'maxpeers': try: c1 = int(get_arg(args, 1).lower()) num_peers = int(c1) if num_peers > 0: old_max_peers = settings.CONNECTED_PEER_MAX settings.set_max_peers(num_peers) NodeLeader.Instance().OnUpdatedMaxPeers( old_max_peers, num_peers) print("set max peers to %s " % num_peers) else: print("Please specify integer greater than zero") except Exception as e: print( "Cannot configure max peers. Please specify an integer greater than 0" ) else: print( "Cannot configure %s try 'config sc-events on|off', 'config debug on|off', 'config sc-debug-notify on|off' or 'config vm-log on|off'" % what)