async def _monitor_node_height(self) -> None: now = datetime.utcnow().timestamp() for node in self.nodes: if now - node.best_height_last_update > self.MAX_HEIGHT_UPDATE_DURATION: logger.debug( f"Disconnecting node {node.nodeid} Reason: max height update threshold exceeded." ) asyncio.create_task( node.disconnect( reason=payloads.DisconnectReason.POOR_PERFORMANCE)) else: logger.debug( f"Asking node {node.nodeid_human} to send us a height update (PING)" ) # Request latest height from node if settings.database: height = max(0, blockchain.Blockchain().height) else: height = 0 m = message.Message( msg_type=message.MessageType.PING, payload=payloads.PingPayload(height=height)) task = asyncio.create_task(node.send_message(m)) self.tasks.append(task) task.add_done_callback(lambda fut: self.tasks.remove(fut))
def setUpClass(cls) -> None: # this triggers deployment of the native contracts blockchain.Blockchain() settings.network.standby_committee = [ '02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765' ] settings.network.validators_count = 1
async def send_ping(self): if settings.database: height = max(0, blockchain.Blockchain().height) else: height = 0 ping = payloads.PingPayload(height) m = message.Message(msg_type=message.MessageType.PING, payload=ping) await self.send_message(m)
def setUpClass(cls) -> None: settings.network.standby_committee = ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765'] settings.network.validators_count = 1 cls.validator_public_key = cryptography.ECPoint.deserialize_from_bytes( binascii.unhexlify(settings.network.standby_committee[0]) ) cls.validator_account = to_script_hash( contracts.Contract.create_multisig_redeemscript(1, [cls.validator_public_key])) blockchain.Blockchain()
def init(self): self.nodemgr = convenience.NodeManager() # singleton self.ledger = blockchain.Blockchain() # singleton self.block_cache = [] self.block_requests = dict( ) # type: Dict[int, convenience.RequestInfo] self.shutting_down = False self._is_persisting_blocks = False self._tasks = [] self._service_task = None msgrouter.on_block += self.on_block_received
def handler_ping(self, msg: message.Message) -> None: """ Handler for a message with the PING type. Args: msg: """ if settings.database: height = max(0, blockchain.Blockchain().height) else: height = 0 m = message.Message(msg_type=message.MessageType.PONG, payload=payloads.PingPayload(height=height)) self._create_task_with_cleanup(self.send_message(m))
def contract_update(engine: contracts.ApplicationEngine, script: bytes, manifest: bytes) -> None: script_len = len(script) manifest_len = len(manifest) # TODO: In preview 4 revert back to # engine.add_gas(engine.STORAGE_PRICE * (script_len + manifest_len)) # They made a mistake in their storage price calculation logic where manifest size is never taken into account engine.add_gas(engine.STORAGE_PRICE * script_len) contract = engine.snapshot.contracts.try_get(engine.current_scripthash, read_only=True) if contract is None: raise ValueError("Can't find contract to update") if script_len == 0 or script_len > engine.MAX_CONTRACT_LENGTH: raise ValueError(f"Invalid script length: {script_len}") hash_ = to_script_hash(script) if hash_ == engine.current_scripthash or engine.snapshot.contracts.try_get(hash_) is not None: raise ValueError("Nothing to update") old_contract_has_storage = contract.has_storage contract = storage.ContractState(script, contract.manifest) contract.manifest.abi.contract_hash = hash_ engine.snapshot.contracts.put(contract) # migrate storage to new contract hash with blockchain.Blockchain().backend.get_snapshotview() as snapshot: if old_contract_has_storage: for key, value in snapshot.storages.find(engine.current_scripthash, b''): # delete the old storage snapshot.storages.delete(key) # update key to new contract hash key.contract = contract.script_hash() # now persist all data under new contract key snapshot.storages.put(key, value) snapshot.commit() engine.snapshot.contracts.delete(engine.current_scripthash) if manifest_len == 0 or manifest_len > contracts.ContractManifest.MAX_LENGTH: raise ValueError(f"Invalid manifest length: {manifest_len}") contract.manifest = contracts.ContractManifest.from_json(json.loads(manifest.decode())) if not contract.manifest.is_valid(contract.script_hash()): raise ValueError("Error: manifest does not match with script") if (not contract.has_storage and len(list(engine.snapshot.storages.find(contract.script_hash(), key_prefix=b''))) != 0): raise ValueError("Error: New contract does not support storage while old contract has existing storage")
def new_engine( previous_engine: ApplicationEngine = None) -> ApplicationEngine: tx = payloads.Transaction._serializable_init() if not previous_engine: blockchain.Blockchain.__it__ = None snapshot = blockchain.Blockchain( store_genesis_block=True ).currentSnapshot # blockchain is singleton return ApplicationEngine(contracts.TriggerType.APPLICATION, tx, snapshot, 0, test_mode=True) else: return ApplicationEngine(contracts.TriggerType.APPLICATION, tx, previous_engine.snapshot, 0, test_mode=True)
def test_engine(has_container=False, has_snapshot=False, default_script=True, call_flags=contracts.CallFlags.ALL): tx = payloads.Transaction._serializable_init() # this little hack basically nullifies the singleton behaviour and ensures we create # a new instance every time we call it. This in turn gives us a clean backend/snapshot blockchain.Blockchain.__it__ = None snapshot = blockchain.Blockchain(store_genesis_block=True).currentSnapshot if has_container and has_snapshot: engine = contracts.ApplicationEngine(contracts.TriggerType.APPLICATION, tx, snapshot, 0, test_mode=True) elif has_container: engine = contracts.ApplicationEngine(contracts.TriggerType.APPLICATION, tx, None, 0, test_mode=True) elif has_snapshot: engine = contracts.ApplicationEngine(contracts.TriggerType.APPLICATION, None, snapshot, 0, test_mode=True) else: engine = contracts.ApplicationEngine(contracts.TriggerType.APPLICATION, None, None, 0, test_mode=True) if default_script: engine.load_script_with_callflags(vm.Script(b'\x40'), call_flags) # OpCode::RET return engine
async def main(): # Configure network to RC2 TestNet # Values are taken from config.json on the neo-cli github repo settings.network.magic = 844378958 settings.network.seedlist = ['seed1t.neo.org:20333'] settings.network.standby_committee = [ "023e9b32ea89b94d066e649b124fd50e396ee91369e8e2a6ae1b11c170d022256d", "03009b7540e10f2562e5fd8fac9eaec25166a58b26e412348ff5a86927bfac22a2", "02ba2c70f5996f357a43198705859fae2cfea13e1172962800772b3d588a9d4abd", "03408dcd416396f64783ac587ea1e1593c57d9fea880c8a6a1920e92a259477806", "02a7834be9b32e2981d157cb5bbd3acb42cfd11ea5c3b10224d7a44e98c5910f1b", "0214baf0ceea3a66f17e7e1e839ea25fd8bed6cd82e6bb6e68250189065f44ff01", "030205e9cefaea5a1dfc580af20c8d5aa2468bb0148f1a5e4605fc622c80e604ba" ] settings.network.validators_count = 7 # Choose the type of storage, uncomment the next line to use leveldb (requires that libleveldb can be found) # or use the in-memory DB # bc = blockchain.Blockchain(db.LevelDB({'path':'/tmp/neo3/'})) bc = blockchain.Blockchain(db.MemoryDB()) # Uncomment the next line if you're interested in seeing debug information about the network and block syncing process # enable_network_logging() # Start the helper classes that will connect to the network and sync the chain node_mgr = convenience.NodeManager() node_mgr.start() sync_mgr = convenience.SyncManager() await sync_mgr.start() async def print_height(): while True: print(f"Local chain height: {bc.height}") await asyncio.sleep(2) # Start an endless loop informing us about our local chain height await print_height()
def handler_inv(self, msg: message.Message) -> None: """ Handler for a message with the INV type. Args: msg: """ payload = cast(payloads.InventoryPayload, msg.payload) if payload.type == payloads.InventoryType.BLOCK: # neo-cli broadcasts INV messages on a regular interval. We can use those as trigger to request # their latest block height if len(payload.hashes) > 0: if settings.database: height = max(0, blockchain.Blockchain().height) else: height = 0 m = message.Message( msg_type=message.MessageType.PING, payload=payloads.PingPayload(height=height)) self._create_task_with_cleanup(self.send_message(m)) else: logger.debug( f"Message with type INV received. No processing for payload type " # type:ignore f"{payload.type.name} implemented")
import json from neo3 import settings, wallet, contracts, vm, blockchain from neo3.network import payloads, convenience # First configure the network settings. The information is taken from `config.json` in neo-cli settings.network.magic = 692804366 # `Network` number settings.network.standby_committee = [ "03a60c1deaf147b10691c344c76e5f3dac83b555fdd5a3f8d9e2f623b3d1af8df6" ] settings.network.validators_count = 1 # set to the same number as settings.network.seedlist = ['127.0.0.1:20333'] # This initialises the local chain with the genesis block and allows us to get a snapshot of the database. This is # required for calculating network fees automatically. Always call this AFTER setting the network settings otherwise network # syncing and other parts of the system will fail. bc = blockchain.Blockchain() snapshot = bc.currentSnapshot # We start with adding a wallet with a multi-signature account that we will later need to sign the transaction (as well as # obtain the address for to specify as source funds). # There are different means of creating/importing a wallet, choose your flavour. # This creates a wallet and add the consensus node account (address) where the key is protected by the password "123" w = wallet.Wallet() w.account_add( wallet.Account.from_wif( "L2aFaQabd35NspvBzC9xPUzKP1if5WgaC2uw4SkviA58DGvccUEy", "123")) # See also Account.from_* for alternative constructors # Alternatively import a wallet by uncommenting the 3 lines below # with open('wallet.json') as f: