def getPeerProfileInstance(self, peer): '''Gets a peer profile instance from the list of profiles, by address name''' for i in self.peerProfiles: # if the peer's profile is already loaded, return that if i.address == peer: retData = i break else: # if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect retData = onionrpeers.PeerProfiles(peer, self._core) return retData
def getPeerProfileInstance(self, peer): """Gets a peer profile instance from the list of profiles""" for i in self.kv.get('peerProfiles'): # if the peer's profile is already loaded, return that if i.address == peer: retData = i break else: # if the peer's profile is not loaded, return a new one. # connectNewPeer also adds it to the list on connect retData = onionrpeers.PeerProfiles(peer) self.kv.get('peerProfiles').append(retData) return retData
def download_blocks_from_communicator(shared_state: "TooMany"): """Use communicator instance to download blocks in the comms's queue""" blacklist = onionrblacklist.OnionrBlackList() kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter count: int = 0 metadata_validation_result: bool = False # Iterate the block queue in the communicator for blockHash in list(kv.get('blockQueue')): count += 1 try: blockPeers = list(kv.get('blockQueue')[blockHash]) except KeyError: blockPeers = [] removeFromQueue = True if not shoulddownload.should_download(shared_state, blockHash): continue if kv.get('shutdown') or not kv.get('isOnline') or \ storage_counter.is_full(): # Exit loop if shutting down or offline, or disk allocation reached break # Do not download blocks being downloaded if blockHash in kv.get('currentDownloading'): continue if len(kv.get('onlinePeers')) == 0: break # So we can avoid concurrent downloading in other threads of same block kv.get('currentDownloading').append(blockHash) if len(blockPeers) == 0: try: peerUsed = onlinepeers.pick_online_peer(kv) except onionrexceptions.OnlinePeerNeeded: continue else: SystemRandom().shuffle(blockPeers) peerUsed = blockPeers.pop(0) if not kv.get('shutdown') and peerUsed.strip() != '': logger.info(f"Attempting to download %s from {peerUsed}..." % (blockHash[:12], )) content = peeraction.peer_action( shared_state, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer if content is not False and len(content) > 0: try: content = content.encode() except AttributeError: pass realHash = onionrcrypto.hashers.sha3_hash(content) try: realHash = realHash.decode( ) # bytes on some versions for some reason except AttributeError: pass if realHash == blockHash: #content = content.decode() # decode here because sha3Hash needs bytes above metas = blockmetadata.get_block_metadata_from_data( content ) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] try: metadata_validation_result = \ validatemetadata.validate_metadata(metadata, metas[2]) except onionrexceptions.PlaintextNotSupported: logger.debug( f"Not saving {blockHash} due to plaintext not enabled") removeFromQueue = True except onionrexceptions.DataExists: metadata_validation_result = False if metadata_validation_result: # check if metadata is valid, and verify nonce if onionrcrypto.cryptoutils.verify_POW( content): # check if POW is enough/correct logger.info('Attempting to save block %s...' % blockHash[:12]) try: onionrstorage.set_data(content) except onionrexceptions.DataExists: logger.warn('Data is already set for %s ' % (blockHash, )) except onionrexceptions.DiskAllocationReached: logger.error( 'Reached disk allocation allowance, cannot save block %s.' % (blockHash, )) removeFromQueue = False else: blockmetadb.add_to_block_DB( blockHash, dataSaved=True) # add block to meta db blockmetadata.process_block_metadata( blockHash ) # caches block metadata values to block database spawn(local_command, f'/daemon-event/upload_event', post=True, is_json=True, post_data={'block': blockHash}) else: logger.warn('POW failed for block %s.' % (blockHash, )) else: if blacklist.inBlacklist(realHash): logger.warn('Block %s is blacklisted.' % (realHash, )) else: logger.warn('Metadata for block %s is invalid.' % (blockHash, )) blacklist.addToDB(blockHash) else: # if block didn't meet expected hash tempHash = onionrcrypto.hashers.sha3_hash( content) # lazy hack, TODO use var try: tempHash = tempHash.decode() except AttributeError: pass # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed).addScore(-50) if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) else: removeFromQueue = False # Don't remove from queue if 404 if removeFromQueue: try: del kv.get( 'blockQueue' )[blockHash] # remove from block queue both if success or false if count == LOG_SKIP_COUNT: logger.info('%s blocks remaining in queue' % [len(kv.get('blockQueue'))], terminal=True) count = 0 except KeyError: pass kv.get('currentDownloading').remove(blockHash)
def connect_new_peer_to_communicator(shared_state, peer='', useBootstrap=False): retData = False kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") tried = kv.get('offlinePeers') transports = gettransports.get() if peer != '': if stringvalidators.validate_transport(peer): peerList = [peer] else: raise onionrexceptions.InvalidAddress( 'Will not attempt connection test to invalid address') else: peerList = keydb.listkeys.list_adders() mainPeerList = keydb.listkeys.list_adders() peerList = onionrpeers.get_score_sorted_peer_list() """ If we don't have enough peers connected or random chance, select new peers to try """ if len(peerList) < 8 or secrets.randbelow(4) == 3: tryingNew = [] for x in kv.get('newPeers'): if x not in peerList: peerList.append(x) tryingNew.append(x) for i in tryingNew: kv.get('newPeers').remove(i) if len(peerList) == 0 or useBootstrap: # Avoid duplicating bootstrap addresses in peerList if config.get('general.use_bootstrap_list', True): bootstrappeers.add_bootstrap_list_to_peer_list(kv, peerList) for address in peerList: address = address.strip() # Don't connect to our own address if address in transports: continue """Don't connect to invalid address or if its already been tried/connected, or if its cooled down """ if len(address) == 0 or address in tried \ or address in kv.get('onlinePeers') \ or address in kv.get('cooldownPeer'): continue if kv.get('shutdown'): return # Ping a peer, ret = peeraction.peer_action(shared_state, address, 'ping') if ret == 'pong!': time.sleep(0.1) if address not in mainPeerList: # Add a peer to our list if it isn't already since it connected networkmerger.mergeAdders(address) if address not in kv.get('onlinePeers'): logger.info('Connected to ' + address, terminal=True) kv.get('onlinePeers').append(address) kv.get('connectTimes')[address] = epoch.get_epoch() retData = address # add peer to profile list if they're not in it for profile in kv.get('peerProfiles'): if profile.address == address: break else: kv.get('peerProfiles').append( onionrpeers.PeerProfiles(address)) break else: # Mark a peer as tried if they failed to respond to ping tried.append(address) logger.debug('Failed to connect to %s: %s ' % (address, ret)) return retData
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False): config = comm_inst._core.config retData = False tried = comm_inst.offlinePeers if peer != '': if comm_inst._core._utils.validateID(peer): peerList = [peer] else: raise onionrexceptions.InvalidAddress( 'Will not attempt connection test to invalid address') else: peerList = comm_inst._core.listAdders() mainPeerList = comm_inst._core.listAdders() peerList = onionrpeers.getScoreSortedPeerList(comm_inst._core) # If we don't have enough peers connected or random chance, select new peers to try if len(peerList) < 8 or secrets.randbelow(4) == 3: tryingNew = [] for x in comm_inst.newPeers: if x not in peerList: peerList.append(x) tryingNew.append(x) for i in tryingNew: comm_inst.newPeers.remove(i) if len(peerList) == 0 or useBootstrap: # Avoid duplicating bootstrap addresses in peerList comm_inst.addBootstrapListToPeerList(peerList) for address in peerList: if not config.get('tor.v3onions') and len(address) == 62: continue # Don't connect to our own address if address == comm_inst._core.hsAddress: continue # Don't connect to invalid address or if its already been tried/connected, or if its cooled down if len( address ) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer: continue if comm_inst.shutdown: return # Ping a peer, if comm_inst.peerAction(address, 'ping') == 'pong!': time.sleep(0.1) if address not in mainPeerList: # Add a peer to our list if it isn't already since it successfully connected networkmerger.mergeAdders(address, comm_inst._core) if address not in comm_inst.onlinePeers: logger.info('Connected to ' + address) comm_inst.onlinePeers.append(address) comm_inst.connectTimes[ address] = comm_inst._core._utils.getEpoch() retData = address # add peer to profile list if they're not in it for profile in comm_inst.peerProfiles: if profile.address == address: break else: comm_inst.peerProfiles.append( onionrpeers.PeerProfiles(address, comm_inst._core)) break else: # Mark a peer as tried if they failed to respond to ping tried.append(address) logger.debug('Failed to connect to ' + address) return retData
def download_blocks_from_communicator(comm_inst): assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon) for blockHash in list(comm_inst.blockQueue): if len(comm_inst.onlinePeers) == 0: break triedQueuePeers = [] # List of peers we've tried for a block try: blockPeers = list(comm_inst.blockQueue[blockHash]) except KeyError: blockPeers = [] removeFromQueue = True if comm_inst.shutdown or not comm_inst.isOnline: # Exit loop if shutting down or offline break # Do not download blocks being downloaded or that are already saved (edge cases) if blockHash in comm_inst.currentDownloading: #logger.debug('Already downloading block %s...' % blockHash) continue if blockHash in comm_inst._core.getBlockList(): #logger.debug('Block %s is already saved.' % (blockHash,)) try: del comm_inst.blockQueue[blockHash] except KeyError: pass continue if comm_inst._core._blacklist.inBlacklist(blockHash): continue if comm_inst._core._utils.storageCounter.isFull(): break comm_inst.currentDownloading.append( blockHash ) # So we can avoid concurrent downloading in other threads of same block if len(blockPeers) == 0: peerUsed = comm_inst.pickOnlinePeer() else: blockPeers = comm_inst._core._crypto.randomShuffle(blockPeers) peerUsed = blockPeers.pop(0) if not comm_inst.shutdown and peerUsed.strip() != '': logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed)) content = comm_inst.peerAction( peerUsed, 'getdata/' + blockHash) # block content from random peer (includes metadata) if content != False and len(content) > 0: try: content = content.encode() except AttributeError: pass realHash = comm_inst._core._crypto.sha3Hash(content) try: realHash = realHash.decode( ) # bytes on some versions for some reason except AttributeError: pass if realHash == blockHash: content = content.decode( ) # decode here because sha3Hash needs bytes above metas = comm_inst._core._utils.getBlockMetadataFromData( content ) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] if comm_inst._core._utils.validateMetadata( metadata, metas[2] ): # check if metadata is valid, and verify nonce if comm_inst._core._crypto.verifyPow( content): # check if POW is enough/correct logger.info('Attempting to save block %s...' % blockHash[:12]) try: comm_inst._core.setData(content) except onionrexceptions.DiskAllocationReached: logger.error( 'Reached disk allocation allowance, cannot save block %s.' % blockHash) removeFromQueue = False else: comm_inst._core.addToBlockDB(blockHash, dataSaved=True) comm_inst._core._utils.processBlockMetadata( blockHash ) # caches block metadata values to block database else: logger.warn('POW failed for block %s.' % blockHash) else: if comm_inst._core._blacklist.inBlacklist(realHash): logger.warn('Block %s is blacklisted.' % (realHash, )) else: logger.warn('Metadata for block %s is invalid.' % blockHash) comm_inst._core._blacklist.addToDB(blockHash) else: # if block didn't meet expected hash tempHash = comm_inst._core._crypto.sha3Hash( content) # lazy hack, TODO use var try: tempHash = tempHash.decode() except AttributeError: pass # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed, comm_inst._core).addScore(-50) if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) else: removeFromQueue = False # Don't remove from queue if 404 if removeFromQueue: try: del comm_inst.blockQueue[ blockHash] # remove from block queue both if success or false except KeyError: pass comm_inst.currentDownloading.remove(blockHash) comm_inst.decrementThreadCount('getBlocks')
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"): '''Use communicator instance to download blocks in the comms's queue''' blacklist = onionrblacklist.OnionrBlackList() storage_counter = storagecounter.StorageCounter() LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter count: int = 0 metadata_validation_result: bool = False # Iterate the block queue in the communicator for blockHash in list(comm_inst.blockQueue): count += 1 if len(comm_inst.onlinePeers) == 0: break triedQueuePeers = [] # List of peers we've tried for a block try: blockPeers = list(comm_inst.blockQueue[blockHash]) except KeyError: blockPeers = [] removeFromQueue = True if not shoulddownload.should_download(comm_inst, blockHash): continue if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full( ): # Exit loop if shutting down or offline, or disk allocation reached break # Do not download blocks being downloaded if blockHash in comm_inst.currentDownloading: continue comm_inst.currentDownloading.append( blockHash ) # So we can avoid concurrent downloading in other threads of same block if len(blockPeers) == 0: peerUsed = onlinepeers.pick_online_peer(comm_inst) else: blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers) peerUsed = blockPeers.pop(0) if not comm_inst.shutdown and peerUsed.strip() != '': logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed)) content = peeraction.peer_action( comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000 ) # block content from random peer (includes metadata) if content != False and len(content) > 0: try: content = content.encode() except AttributeError: pass realHash = onionrcrypto.hashers.sha3_hash(content) try: realHash = realHash.decode( ) # bytes on some versions for some reason except AttributeError: pass if realHash == blockHash: #content = content.decode() # decode here because sha3Hash needs bytes above metas = blockmetadata.get_block_metadata_from_data( content ) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] try: metadata_validation_result = validatemetadata.validate_metadata( metadata, metas[2]) except onionrexceptions.DataExists: metadata_validation_result = False if metadata_validation_result: # check if metadata is valid, and verify nonce if onionrcrypto.cryptoutils.verify_POW( content): # check if POW is enough/correct logger.info('Attempting to save block %s...' % blockHash[:12]) try: onionrstorage.set_data(content) except onionrexceptions.DataExists: logger.warn('Data is already set for %s ' % (blockHash, )) except onionrexceptions.DiskAllocationReached: logger.error( 'Reached disk allocation allowance, cannot save block %s.' % (blockHash, )) removeFromQueue = False else: blockmetadb.add_to_block_DB( blockHash, dataSaved=True) # add block to meta db blockmetadata.process_block_metadata( blockHash ) # caches block metadata values to block database else: logger.warn('POW failed for block %s.' % (blockHash, )) else: if blacklist.inBlacklist(realHash): logger.warn('Block %s is blacklisted.' % (realHash, )) else: logger.warn('Metadata for block %s is invalid.' % (blockHash, )) blacklist.addToDB(blockHash) else: # if block didn't meet expected hash tempHash = onionrcrypto.hashers.sha3_hash( content) # lazy hack, TODO use var try: tempHash = tempHash.decode() except AttributeError: pass # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed).addScore(-50) if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) else: removeFromQueue = False # Don't remove from queue if 404 if removeFromQueue: try: del comm_inst.blockQueue[ blockHash] # remove from block queue both if success or false if count == LOG_SKIP_COUNT: logger.info('%s blocks remaining in queue' % [len(comm_inst.blockQueue)], terminal=True) count = 0 except KeyError: pass comm_inst.currentDownloading.remove(blockHash) comm_inst.decrementThreadCount('getBlocks')