def request_nodes(self, node, port): url = self.NODES_URL.format(node, port) try: response = requests.get(url) if response.status_code == 200: all_nodes = response.json() return all_nodes except requests.exceptions.RequestException as re: self.peers.record_downtime(node) logger.debug('Downtime recorded for host {}'.format(node)) return None
def push_synchronize(self, node, blocks_inv, current_height, host): # Push local blocks_inv to remote node to initiate a sync data = { "host": host, "type": MessageType.SYNCHRONIZE.value, "data": {"height": current_height, "blocks_inv": blocks_inv} } logger.debug("sending sync request to peer at: {}".format(node)) url = self.INBOX_URL.format(node, self.FULL_NODE_PORT) try: response = requests.post(url, json=data) except requests.exceptions.RequestException as re: logger.warn("Request Exception with host: {}".format(node)) self.peers.record_downtime(node) return
def broadcast_block_header(self, block_header, host): # Used only when broadcasting a block header that originated (mined) locally data = { "host": host, "type": MessageType.BLOCK_HEADER.value, "data": block_header.to_json() } logger.debug("broadcasting block header: {}".format(data)) for node in self.peers.get_all_peers(): url = self.INBOX_URL.format(node, self.FULL_NODE_PORT) try: response = requests.post(url, json=data) except requests.exceptions.RequestException as re: logger.warn("Request Exception with host: {}".format(node)) self.peers.record_downtime(node) return
def broadcast_unconfirmed_transaction_inv(self, tx_hashes, host): # Used for (re)broadcasting a new transaction that was received and added data = { "host": host, "type": MessageType.UNCONFIRMED_TRANSACTION_INV.value, "data": tx_hashes } logger.debug("broadcasting transaction inv: {}".format(data)) for node in self.peers.get_all_peers(): url = self.INBOX_URL.format(node, self.FULL_NODE_PORT) try: response = requests.post(url, json=data) except requests.exceptions.RequestException as re: logger.warn("Request Exception with host: {}".format(node)) self.peers.record_downtime(node) return
def broadcast_block_inv(self, block_hashes, host): # Used for (re)broadcasting a new block that was received and added data = { "host": host, "type": MessageType.BLOCK_INV.value, "data": block_hashes } logger.debug("broadcasting block inv: {}".format(data)) for node in self.peers.get_all_peers(): url = self.INBOX_URL.format(node, self.FULL_NODE_PORT) try: response = requests.post(url, json=data) except requests.exceptions.RequestException as re: logger.warn("Request Exception with host: {}".format(node)) self.peers.record_downtime(node) return
def shutdown(self): logger.debug("full node on %s shutting down...", self.HOST) self.bottle_process.terminate() logger.debug("worker process(es) shutting down...") for wp in self.worker_processes: wp.terminate() logger.debug("queue process shutting down...") self.queue_process.terminate()
def start(self): logger.debug("queue process starting...") self.queue_process = mp.Process(target=Queue.start_queue) self.queue_process.start() logger.debug("worker process(es) starting...") self.worker_processes = [mp.Process(target=self.worker) for _ in range(self.WORKER_PROCESSES)] for wp in self.worker_processes: wp.start() logger.debug("full node server starting on %s...", self.HOST) self.bottle_process = mp.Process(target=self.app.run, kwargs=dict(host="0.0.0.0", port=self.FULL_NODE_PORT, debug=True)) self.bottle_process.start() self.check_peers()
def shutdown(self): logger.debug("mining process with reward address %s shutting down...", self.REWARD_ADDRESS) self.miner_process.terminate()
def start(self): logger.debug("mining process starting with reward address %s...", self.REWARD_ADDRESS) self.miner_process = mp.Process(target=self.mine) self.miner_process.start()