def run(self): while not state.shutdown: chunk = [] while True: try: data = addrQueue.get(False) chunk.append(data) except Queue.Empty: break if chunk: # Choose peers randomly connections = BMConnectionPool().establishedConnections() randomshuffle(connections) for i in connections: randomshuffle(chunk) filtered = [] for stream, peer, seen, destination in chunk: # peer's own address or address received from peer if i.destination in (peer, destination): continue if stream not in i.streams: continue filtered.append((stream, peer, seen)) if filtered: i.append_write_buf(assemble_addr(filtered)) addrQueue.iterate() for i in range(len(chunk)): addrQueue.task_done() self.stop.wait(1)
def bm_command_getdata(self): items = self.decode_payload_content("l32s") # skip? if time.time() < self.skipUntil: return True #TODO make this more asynchronous helper_random.randomshuffle(items) for i in map(str, items): if Dandelion().hasHash(i) and \ self != Dandelion().objectChildStem(i): self.antiIntersectionDelay() logger.info( '%s asked for a stem object we didn\'t offer to it.', self.destination) break else: try: self.append_write_buf( protocol.CreatePacket('object', Inventory()[i].payload)) except KeyError: self.antiIntersectionDelay() logger.info('%s asked for an object we don\'t have.', self.destination) break # I think that aborting after the first missing/stem object is more secure # when using random reordering, as the recipient won't know exactly which objects we refuse to deliver return True
def run(self): while not self._stopped: uploaded = 0 # Choose uploading peers randomly connections = BMConnectionPool().establishedConnections() helper_random.randomshuffle(connections) for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue if len(i.write_buf) > self.maxBufSize: continue try: request = i.pendingUpload.randomKeys( RandomTrackingDict.maxPending) except KeyError: continue payload = bytearray() chunk_count = 0 for chunk in request: del i.pendingUpload[chunk] if Dandelion().hasHash(chunk) and \ i != Dandelion().objectChildStem(chunk): i.antiIntersectionDelay() self.logger.info( '%s asked for a stem object we didn\'t offer to it.', i.destination) break try: payload.extend( protocol.CreatePacket('object', Inventory()[chunk].payload)) chunk_count += 1 except KeyError: i.antiIntersectionDelay() self.logger.info( '%s asked for an object we don\'t have.', i.destination) break if not chunk_count: continue i.append_write_buf(payload) self.logger.debug('%s:%i Uploading %i objects', i.destination.host, i.destination.port, chunk_count) uploaded += chunk_count if not uploaded: self.stop.wait(1)
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = [ x for x in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() if x.fullyEstablished ] helper_random.randomshuffle(connections) try: requestChunk = max( int( min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue try: request = i.objectsNewToMe.randomKeys(requestChunk) except KeyError: continue payload = bytearray() chunkCount = 0 for chunk in request: if chunk in Inventory() and not Dandelion().hasHash(chunk): try: del i.objectsNewToMe[chunk] except KeyError: pass continue payload.extend(chunk) chunkCount += 1 missingObjects[chunk] = now if not chunkCount: continue payload[0:0] = addresses.encodeVarint(chunkCount) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount) requested += chunkCount if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1)