def run(self): while not state.shutdown: chunk = [] while True: # Dandelion fluff trigger by expiration Dandelion().expire() try: data = invQueue.get(False) chunk.append((data[0], data[1])) # locally generated if len(data) == 2 or data[2] is None: self.handleLocallyGenerated(data[0], data[1]) except Queue.Empty: break if chunk: for connection in BMConnectionPool().inboundConnections.values() + \ BMConnectionPool().outboundConnections.values(): fluffs = [] stems = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] except KeyError: continue try: if connection == Dandelion().objectChildStem( inv[1]): # Fluff trigger by RNG # auto-ignore if config set to 0, i.e. dandelion is off if randint(1, 100) >= state.dandelion: fluffs.append(inv[1]) # send a dinv only if the stem node supports dandelion elif connection.services & protocol.NODE_DANDELION > 0: stems.append(inv[1]) else: fluffs.append(inv[1]) except KeyError: fluffs.append(inv[1]) if fluffs: shuffle(fluffs) connection.append_write_buf(protocol.CreatePacket('inv', \ addresses.encodeVarint(len(fluffs)) + "".join(fluffs))) if stems: shuffle(stems) connection.append_write_buf(protocol.CreatePacket('dinv', \ addresses.encodeVarint(len(stems)) + "".join(stems))) invQueue.iterate() for i in range(len(chunk)): invQueue.task_done() if Dandelion().refresh < time(): Dandelion().reRandomiseStems() self.stop.wait(1)
def sendverack(self): logger.debug('Sending verack') self.sendDataThreadQueue.put( (0, 'sendRawData', protocol.CreatePacket('verack'))) self.verackSent = True if self.verackReceived: self.connectionFullyEstablished()
def bm_command_getdata(self): items = self.decode_payload_content("l32s") # skip? if time.time() < self.skipUntil: return True #TODO make this more asynchronous helper_random.randomshuffle(items) for i in map(str, items): if Dandelion().hasHash(i) and \ self != Dandelion().objectChildStem(i): self.antiIntersectionDelay() logger.info( '%s asked for a stem object we didn\'t offer to it.', self.destination) break else: try: self.append_write_buf( protocol.CreatePacket('object', Inventory()[i].payload)) except KeyError: self.antiIntersectionDelay() logger.info('%s asked for an object we don\'t have.', self.destination) break # I think that aborting after the first missing/stem object is more secure # when using random reordering, as the recipient won't know exactly which objects we refuse to deliver return True
def bm_command_version(self): self.remoteProtocolVersion, self.services, self.timestamp, self.sockNode, self.peerNode, self.nonce, \ self.userAgent, self.streams = self.decode_payload_content("IQQiiQlsLv") self.nonce = struct.pack('>Q', self.nonce) self.timeOffset = self.timestamp - int(time.time()) logger.debug("remoteProtocolVersion: %i", self.remoteProtocolVersion) logger.debug("services: 0x%08X", self.services) logger.debug("time offset: %i", self.timestamp - int(time.time())) logger.debug("my external IP: %s", self.sockNode.host) logger.debug("remote node incoming address: %s:%i", self.destination.host, self.peerNode.port) logger.debug("user agent: %s", self.userAgent) logger.debug("streams: [%s]", ",".join(map(str,self.streams))) if not self.peerValidityChecks(): # TODO ABORT return True #shared.connectedHostsList[self.destination] = self.streams[0] self.append_write_buf(protocol.CreatePacket('verack')) self.verackSent = True if not self.isOutbound: self.append_write_buf(protocol.assembleVersionMessage(self.destination.host, self.destination.port, \ network.connectionpool.BMConnectionPool().streams, True, nodeid=self.nodeid)) #print "%s:%i: Sending version" % (self.destination.host, self.destination.port) if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and protocol.haveSSL(not self.isOutbound)): self.isSSL = True if self.verackReceived: if self.isSSL: self.set_state("tls_init", length=self.payloadLength, expectBytes=0) return False self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0) return False return True
def sendgetdata(self, hashes): if len(hashes) == 0: return logger.debug('sending getdata to retrieve %i objects', len(hashes)) payload = encodeVarint(len(hashes)) + ''.join(hashes) self.sendDataThreadQueue.put( (0, 'sendRawData', protocol.CreatePacket('getdata', payload)), False)
def sendChunk(): """Send one chunk of inv entries in one command""" if objectCount == 0: return logger.debug( 'Sending huge inv message with %i objects to just this' ' one peer', objectCount) self.append_write_buf(protocol.CreatePacket( 'inv', addresses.encodeVarint(objectCount) + payload))
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = BMConnectionPool().inboundConnections.values( ) + BMConnectionPool().outboundConnections.values() random.shuffle(connections) try: requestChunk = max( int(DownloadThread.maxRequestChunk / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() timedOut = now - DownloadThread.requestTimeout # this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk with i.objectsNewToMeLock: try: downloadPending = len( list((k for k, v in i.objectsNewToMe.iteritems() if k in missingObjects and missingObjects[k] > timedOut and not v))) except KeyError: continue if downloadPending >= DownloadThread.minPending: continue # keys with True values in the dict try: request = list( (k for k, v in i.objectsNewToMe.iteritems() if k not in missingObjects or missingObjects[k] < timedOut)) except KeyError: continue random.shuffle(request) if not request: continue if len(request) > requestChunk - downloadPending: request = request[:requestChunk - downloadPending] # mark them as pending for k in request: i.objectsNewToMe[k] = False missingObjects[k] = now payload = bytearray() payload.extend(addresses.encodeVarint(len(request))) for chunk in request: payload.extend(chunk) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request)) requested += len(request) if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(5)
def run(self): while not self._stopped: uploaded = 0 # Choose uploading peers randomly connections = BMConnectionPool().establishedConnections() helper_random.randomshuffle(connections) for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue if len(i.write_buf) > self.maxBufSize: continue try: request = i.pendingUpload.randomKeys( RandomTrackingDict.maxPending) except KeyError: continue payload = bytearray() chunk_count = 0 for chunk in request: del i.pendingUpload[chunk] if Dandelion().hasHash(chunk) and \ i != Dandelion().objectChildStem(chunk): i.antiIntersectionDelay() self.logger.info( '%s asked for a stem object we didn\'t offer to it.', i.destination) break try: payload.extend( protocol.CreatePacket('object', Inventory()[chunk].payload)) chunk_count += 1 except KeyError: i.antiIntersectionDelay() self.logger.info( '%s asked for an object we don\'t have.', i.destination) break if not chunk_count: continue i.append_write_buf(payload) self.logger.debug('%s:%i Uploading %i objects', i.destination.host, i.destination.port, chunk_count) uploaded += chunk_count if not uploaded: self.stop.wait(1)
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = [ x for x in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() if x.fullyEstablished ] helper_random.randomshuffle(connections) try: requestChunk = max( int( min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue try: request = i.objectsNewToMe.randomKeys(requestChunk) except KeyError: continue payload = bytearray() chunkCount = 0 for chunk in request: if chunk in Inventory() and not Dandelion().hasHash(chunk): try: del i.objectsNewToMe[chunk] except KeyError: pass continue payload.extend(chunk) chunkCount += 1 missingObjects[chunk] = now if not chunkCount: continue payload[0:0] = addresses.encodeVarint(chunkCount) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount) requested += chunkCount if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1)
def bm_command_getdata(self): items = self.decode_payload_content("l32s") # skip? if time.time() < self.skipUntil: return True #TODO make this more asynchronous and allow reordering for i in items: try: self.append_write_buf( protocol.CreatePacket('object', Inventory()[i].payload)) except KeyError: self.antiIntersectionDelay() logger.info('%s asked for an object we don\'t have.', self.destination) return True
def bm_command_version(self): """ Incoming version. Parse and log, remember important things, like streams, bitfields, etc. """ decoded = self.decode_payload_content("IQQiiQlslv") (self.remoteProtocolVersion, self.services, self.timestamp, self.sockNode, self.peerNode, self.nonce, self.userAgent) = decoded[:7] self.streams = decoded[7:] self.nonce = struct.pack('>Q', self.nonce) self.timeOffset = self.timestamp - int(time.time()) logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion) logger.debug('services: 0x%08X', self.services) logger.debug('time offset: %i', self.timeOffset) logger.debug('my external IP: %s', self.sockNode.host) logger.debug('remote node incoming address: %s:%i', self.destination.host, self.peerNode.port) logger.debug('user agent: %s', self.userAgent) logger.debug('streams: [%s]', ','.join(map(str, self.streams))) if not self.peerValidityChecks(): # ABORT afterwards return True self.append_write_buf(protocol.CreatePacket('verack')) self.verackSent = True if not self.isOutbound: self.append_write_buf( protocol.assembleVersionMessage( self.destination.host, self.destination.port, connectionpool.BMConnectionPool().streams, True, nodeid=self.nodeid)) logger.debug('%(host)s:%(port)i sending version', self.destination._asdict()) if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and protocol.haveSSL(not self.isOutbound)): self.isSSL = True if not self.verackReceived: return True self.set_state( "tls_init" if self.isSSL else "connection_fully_established", length=self.payloadLength, expectBytes=0) return False
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = BMConnectionPool().inboundConnections.values( ) + BMConnectionPool().outboundConnections.values() random.shuffle(connections) for i in connections: now = time.time() timedOut = now - DownloadThread.requestTimeout # this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk with i.objectsNewToMeLock: downloadPending = len( list((k for k, v in i.objectsNewToMe.iteritems() if k in self.pending and self.pending[k] > timedOut))) if downloadPending >= DownloadThread.maxPending: continue # keys with True values in the dict request = list( (k for k, v in i.objectsNewToMe.iteritems() if k not in self.pending or self.pending[k] < timedOut )) if not request: continue if len(request ) > DownloadThread.requestChunk - downloadPending: request = request[:DownloadThread.requestChunk - downloadPending] # mark them as pending for k in request: i.objectsNewToMe[k] = False self.pending[k] = now payload = addresses.encodeVarint( len(request)) + ''.join(request) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request)) requested += len(request) if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1)
def assembleAddr(peerList): if isinstance(peerList, state.Peer): peerList = (peerList) if not peerList: return b'' retval = b'' for i in range(0, len(peerList), BMProto.maxAddrCount): payload = addresses.encodeVarint(len(peerList[i:i + BMProto.maxAddrCount])) for address in peerList[i:i + BMProto.maxAddrCount]: stream, peer, timestamp = address payload += struct.pack( '>Q', timestamp) # 64-bit time payload += struct.pack('>I', stream) payload += struct.pack( '>q', 1) # service bit flags offered by this node payload += protocol.encodeHost(peer.host) payload += struct.pack('>H', peer.port) # remote port retval += protocol.CreatePacket('addr', payload) return retval
def run(self): while not state.shutdown: chunk = [] while True: try: data = invQueue.get(False) if len(data) == 2: BMConnectionPool().handleReceivedObject( data[0], data[1]) else: source = BMConnectionPool().getConnectionByAddr( data[2]) BMConnectionPool().handleReceivedObject( data[0], data[1], source) chunk.append((data[0], data[1])) except Queue.Empty: break # connection not found, handle it as if generated locally except KeyError: BMConnectionPool().handleReceivedObject(data[0], data[1]) if chunk: for connection in BMConnectionPool().inboundConnections.values() + \ BMConnectionPool().outboundConnections.values(): hashes = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] hashes.append(inv[1]) except KeyError: continue if hashes: connection.append_write_buf(protocol.CreatePacket('inv', \ addresses.encodeVarint(len(hashes)) + "".join(hashes))) invQueue.iterate() self.stop.wait(1)
def sendpong(self, payload): logger.debug('Sending pong') self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('pong', payload)))
def loop(self): # pylint: disable=too-many-branches,too-many-statements """Main Connectionpool's loop""" # pylint: disable=too-many-locals # defaults to empty loop if outbound connections are maxed spawnConnections = False acceptConnections = True if BMConfigParser().safeGetBoolean('bitmessagesettings', 'dontconnect'): acceptConnections = False elif BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections'): spawnConnections = True socksproxytype = BMConfigParser().safeGet('bitmessagesettings', 'socksproxytype', '') onionsocksproxytype = BMConfigParser().safeGet('bitmessagesettings', 'onionsocksproxytype', '') if (socksproxytype[:5] == 'SOCKS' and not BMConfigParser().safeGetBoolean( 'bitmessagesettings', 'sockslisten') and '.onion' not in BMConfigParser().safeGet( 'bitmessagesettings', 'onionhostname', '')): acceptConnections = False # pylint: disable=too-many-nested-blocks if spawnConnections: if not knownnodes.knownNodesActual: self.startBootstrappers() knownnodes.knownNodesActual = True if not self._bootstrapped: self._bootstrapped = True Proxy.proxy = (BMConfigParser().safeGet( 'bitmessagesettings', 'sockshostname'), BMConfigParser().safeGetInt( 'bitmessagesettings', 'socksport')) # TODO AUTH # TODO reset based on GUI settings changes try: if not onionsocksproxytype.startswith("SOCKS"): raise ValueError Proxy.onion_proxy = (BMConfigParser().safeGet( 'network', 'onionsockshostname', None), BMConfigParser().safeGet( 'network', 'onionsocksport', None)) except ValueError: Proxy.onion_proxy = None established = sum(1 for c in self.outboundConnections.values() if (c.connected and c.fullyEstablished)) pending = len(self.outboundConnections) - established if established < BMConfigParser().safeGetInt( 'bitmessagesettings', 'maxoutboundconnections'): for i in range(state.maximumNumberOfHalfOpenConnections - pending): try: chosen = self.trustedPeer or chooseConnection( helper_random.randomchoice(self.streams)) except ValueError: continue if chosen in self.outboundConnections: continue if chosen.host in self.inboundConnections: continue # don't connect to self if chosen in state.ownAddresses: continue # don't connect to the hosts from the same # network group, defense against sibyl attacks host_network_group = protocol.network_group(chosen.host) same_group = False for j in self.outboundConnections.values(): if host_network_group == j.network_group: same_group = True if chosen.host == j.destination.host: knownnodes.decreaseRating(chosen) break if same_group: continue try: if chosen.host.endswith( ".onion") and Proxy.onion_proxy: if onionsocksproxytype == "SOCKS5": self.addConnection(Socks5BMConnection(chosen)) elif onionsocksproxytype == "SOCKS4a": self.addConnection(Socks4aBMConnection(chosen)) elif socksproxytype == "SOCKS5": self.addConnection(Socks5BMConnection(chosen)) elif socksproxytype == "SOCKS4a": self.addConnection(Socks4aBMConnection(chosen)) else: self.addConnection(TCPConnection(chosen)) except socket.error as e: if e.errno == errno.ENETUNREACH: continue self._lastSpawned = time.time() else: for i in self.connections(): # FIXME: rating will be increased after next connection i.handle_close() if acceptConnections: if not self.listeningSockets: if BMConfigParser().safeGet('network', 'bind') == '': self.startListening() else: for bind in re.sub( r'[^\w.]+', ' ', BMConfigParser().safeGet('network', 'bind')).split(): self.startListening(bind) logger.info('Listening for incoming connections.') if not self.udpSockets: if BMConfigParser().safeGet('network', 'bind') == '': self.startUDPSocket() else: for bind in re.sub( r'[^\w.]+', ' ', BMConfigParser().safeGet('network', 'bind')).split(): self.startUDPSocket(bind) self.startUDPSocket(False) logger.info('Starting UDP socket(s).') else: if self.listeningSockets: for i in self.listeningSockets.values(): i.close_reason = "Stopping listening" i.accepting = i.connecting = i.connected = False logger.info('Stopped listening for incoming connections.') if self.udpSockets: for i in self.udpSockets.values(): i.close_reason = "Stopping UDP socket" i.accepting = i.connecting = i.connected = False logger.info('Stopped udp sockets.') loopTime = float(self._spawnWait) if self._lastSpawned < time.time() - self._spawnWait: loopTime = 2.0 asyncore.loop(timeout=loopTime, count=1000) reaper = [] for i in self.connections(): minTx = time.time() - 20 if i.fullyEstablished: minTx -= 300 - 20 if i.lastTx < minTx: if i.fullyEstablished: i.append_write_buf(protocol.CreatePacket('ping')) else: i.close_reason = "Timeout (%is)" % (time.time() - i.lastTx) i.set_state("close") for i in (self.connections() + self.listeningSockets.values() + self.udpSockets.values()): if not (i.accepting or i.connecting or i.connected): reaper.append(i) else: try: if i.state == "close": reaper.append(i) except AttributeError: pass for i in reaper: self.removeConnection(i)
def bm_command_ping(self): self.append_write_buf(protocol.CreatePacket('pong')) return True
def bm_command_ping(self): """Incoming ping, respond to it.""" self.append_write_buf(protocol.CreatePacket('pong')) return True
def sendChunk(): if numberOfAddressesInAddrMessage == 0: return self.sendDataThreadQueue.put((0, 'sendRawData', \ protocol.CreatePacket('addr', \ encodeVarint(numberOfAddressesInAddrMessage) + payload)))
def loop(self): # defaults to empty loop if outbound connections are maxed spawnConnections = False acceptConnections = True if BMConfigParser().safeGetBoolean('bitmessagesettings', 'dontconnect'): acceptConnections = False elif BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections'): spawnConnections = True if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and \ (not BMConfigParser().getboolean('bitmessagesettings', 'sockslisten') and \ ".onion" not in BMConfigParser().get('bitmessagesettings', 'onionhostname')): acceptConnections = False if spawnConnections: if not self.bootstrapped: helper_bootstrap.dns() self.bootstrapped = True Proxy.proxy = (BMConfigParser().safeGet("bitmessagesettings", "sockshostname"), BMConfigParser().safeGetInt("bitmessagesettings", "socksport")) # TODO AUTH # TODO reset based on GUI settings changes try: if not BMConfigParser().get("network", "onionsocksproxytype").startswith("SOCKS"): raise NoOptionError Proxy.onionproxy = (BMConfigParser().get("network", "onionsockshostname"), BMConfigParser().getint("network", "onionsocksport")) except (NoOptionError, NoSectionError): Proxy.onionproxy = None established = sum(1 for c in self.outboundConnections.values() if (c.connected and c.fullyEstablished)) pending = len(self.outboundConnections) - established if established < BMConfigParser().safeGetInt("bitmessagesettings", "maxoutboundconnections"): for i in range(state.maximumNumberOfHalfOpenConnections - pending): try: chosen = chooseConnection(random.choice(self.streams)) except ValueError: continue if chosen in self.outboundConnections: continue if chosen.host in self.inboundConnections: continue # don't connect to self if chosen in state.ownAddresses: continue #for c in self.outboundConnections: # if chosen == c.destination: # continue #for c in self.inboundConnections: # if chosen.host == c.destination.host: # continue try: if chosen.host.endswith(".onion") and Proxy.onionproxy is not None: if BMConfigParser().get("network", "onionsocksproxytype") == "SOCKS5": self.addConnection(Socks5BMConnection(chosen)) elif BMConfigParser().get("network", "onionsocksproxytype") == "SOCKS4a": self.addConnection(Socks4aBMConnection(chosen)) elif BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") == "SOCKS5": self.addConnection(Socks5BMConnection(chosen)) elif BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") == "SOCKS4a": self.addConnection(Socks4aBMConnection(chosen)) else: self.addConnection(TCPConnection(chosen)) except socket.error as e: if e.errno == errno.ENETUNREACH: continue except (NoSectionError, NoOptionError): # shouldn't happen pass self.lastSpawned = time.time() else: for i in ( self.inboundConnections.values() + self.outboundConnections.values() ): i.set_state("close") # FIXME: rating will be increased after next connection i.handle_close() if acceptConnections: if not self.listeningSockets: if BMConfigParser().safeGet("network", "bind") == '': self.startListening() else: for bind in re.sub("[^\w.]+", " ", BMConfigParser().safeGet("network", "bind")).split(): self.startListening(bind) logger.info('Listening for incoming connections.') if not self.udpSockets: if BMConfigParser().safeGet("network", "bind") == '': self.startUDPSocket() else: for bind in re.sub("[^\w.]+", " ", BMConfigParser().safeGet("network", "bind")).split(): self.startUDPSocket(bind) self.startUDPSocket(False) logger.info('Starting UDP socket(s).') else: if self.listeningSockets: for i in self.listeningSockets.values(): i.close_reason = "Stopping listening" i.accepting = i.connecting = i.connected = False logger.info('Stopped listening for incoming connections.') if self.udpSockets: for i in self.udpSockets.values(): i.close_reason = "Stopping UDP socket" i.accepting = i.connecting = i.connected = False logger.info('Stopped udp sockets.') loopTime = float(self.spawnWait) if self.lastSpawned < time.time() - self.spawnWait: loopTime = 2.0 asyncore.loop(timeout=loopTime, count=1000) reaper = [] for i in self.inboundConnections.values() + self.outboundConnections.values(): minTx = time.time() - 20 if i.fullyEstablished: minTx -= 300 - 20 if i.lastTx < minTx: if i.fullyEstablished: i.append_write_buf(protocol.CreatePacket('ping')) else: i.close_reason = "Timeout (%is)" % (time.time() - i.lastTx) i.set_state("close") for i in self.inboundConnections.values() + self.outboundConnections.values() + self.listeningSockets.values() + self.udpSockets.values(): if not (i.accepting or i.connecting or i.connected): reaper.append(i) else: try: if i.state == "close": reaper.append(i) except AttributeError: pass for i in reaper: self.removeConnection(i)
def sendObject(self, hash, payload): logger.debug('sending an object.') self.sendDataThreadQueue.put((0, 'sendRawData', (hash, protocol.CreatePacket('object',payload))))
def sendinvMessageToJustThisOnePeer(self, numberOfObjects, payload): payload = encodeVarint(numberOfObjects) + payload logger.debug('Sending huge inv message with ' + str(numberOfObjects) + ' objects to just this one peer') self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('inv', payload)))
def run(self): logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues))) while self.sendBytes(): deststream, command, data = self.sendDataThreadQueue.get() if deststream == 0 or deststream in self.streamNumber: if command == 'shutdown': logger.debug('sendDataThread (associated with ' + str(self.peer) + ') ID: ' + str(id(self)) + ' shutting down now.') break # When you receive an incoming connection, a sendDataThread is # created even though you don't yet know what stream number the # remote peer is interested in. They will tell you in a version # message and if you too are interested in that stream then you # will continue on with the connection and will set the # streamNumber of this send data thread here: elif command == 'setStreamNumber': self.streamNumber = data logger.debug('setting the stream number to %s', ', '.join(str(x) for x in self.streamNumber)) elif command == 'setRemoteProtocolVersion': specifiedRemoteProtocolVersion = data logger.debug( 'setting the remote node\'s protocol version in the sendDataThread (ID: ' + str(id(self)) + ') to ' + str(specifiedRemoteProtocolVersion)) self.remoteProtocolVersion = specifiedRemoteProtocolVersion elif command == 'sendaddr': if self.connectionIsOrWasFullyEstablished: # only send addr messages if we have sent and heard a verack from the remote node numberOfAddressesInAddrMessage = len(data) payload = '' for hostDetails in data: timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails payload += pack('>Q', timeLastReceivedMessageFromThisNode ) # now uses 64-bit time payload += pack('>I', streamNumber) payload += pack( '>q', services ) # service bit flags offered by this node payload += protocol.encodeHost(host) payload += pack('>H', port) payload = encodeVarint( numberOfAddressesInAddrMessage) + payload packet = protocol.CreatePacket('addr', payload) if not self.sendBytes(packet): break elif command == 'advertiseobject': self.objectHashHolderInstance.holdHash(data) elif command == 'sendinv': if self.connectionIsOrWasFullyEstablished: # only send inv messages if we have send and heard a verack from the remote node payload = '' for hash in data: payload += hash if payload != '': payload = encodeVarint(len(payload) / 32) + payload packet = protocol.CreatePacket('inv', payload) if not self.sendBytes(packet): break elif command == 'pong': if self.lastTimeISentData < (int(time.time()) - 298): # Send out a pong message to keep the connection alive. logger.debug('Sending pong to ' + str(self.peer) + ' to keep connection alive.') packet = protocol.CreatePacket('pong') if not self.sendBytes(packet): break elif command == 'sendRawData': objectHash = None if type(data) in [list, tuple]: objectHash, data = data if not self.sendBytes(data): break if objectHash: PendingUpload().delete(objectHash) elif command == 'connectionIsOrWasFullyEstablished': self.connectionIsOrWasFullyEstablished = True self.services, self.sock = data elif self.connectionIsOrWasFullyEstablished: logger.error('sendDataThread ID: ' + str(id(self)) + ' ignoring command ' + command + ' because the thread is not in stream ' + str(deststream) + ' but in streams ' + ', '.join(str(x) for x in self.streamNumber)) self.sendDataThreadQueue.task_done() # Flush if the cycle ended with break try: self.sendDataThreadQueue.task_done() except ValueError: pass try: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() except: pass state.sendDataQueues.remove(self.sendDataThreadQueue) PendingUpload().threadEnd() logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues))) self.objectHashHolderInstance.close()
def run(self): while not state.shutdown: chunk = [] while True: try: data = invQueue.get(False) chunk.append((data[0], data[1])) # locally generated if len(data) == 2: Dandelion().addHash(data[1], None) BMConnectionPool().handleReceivedObject(data[0], data[1]) # came over the network else: source = BMConnectionPool().getConnectionByAddr(data[2]) BMConnectionPool().handleReceivedObject(data[0], data[1], source) except Queue.Empty: break # connection not found, handle it as if generated locally except KeyError: BMConnectionPool().handleReceivedObject(data[0], data[1]) if chunk: for connection in BMConnectionPool().inboundConnections.values() + \ BMConnectionPool().outboundConnections.values(): fluffs = [] stems = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] except KeyError: continue try: if connection == Dandelion().hashMap[inv[1]]: # Fluff trigger by RNG # auto-ignore if config set to 0, i.e. dandelion is off # send a normal inv if stem node doesn't support dandelion if randint(1, 100) < BMConfigParser().safeGetBoolean("network", "dandelion") and \ connection.services | protocol.NODE_DANDELION > 0: stems.append(inv[1]) else: fluffs.append(inv[1]) except KeyError: fluffs.append(inv[1]) if fluffs: shuffle(fluffs) connection.append_write_buf(protocol.CreatePacket('inv', \ addresses.encodeVarint(len(fluffs)) + "".join(fluffs))) if stems: shuffle(stems) connection.append_write_buf(protocol.CreatePacket('dinv', \ addresses.encodeVarint(len(stems)) + "".join(stems))) invQueue.iterate() for i in range(len(chunk)): invQueue.task_done() if Dandelion().refresh < time(): BMConnectionPool().reRandomiseDandelionStems() self.stop.wait(1)