def genAckPayload(streamNumber=1, stealthLevel=0): if (stealthLevel==2): # Generate privacy-enhanced payload # Generate a dummy privkey and derive the pubkey dummyPubKeyHex = highlevelcrypto.privToPub(hexlify(helper_random.randomBytes(32))) # Generate a dummy message of random length # (the smallest possible standard-formatted message is 234 bytes) dummyMessage = helper_random.randomBytes(random.randint(234, 800)) # Encrypt the message using standard BM encryption (ECIES) ackdata = highlevelcrypto.encrypt(dummyMessage, dummyPubKeyHex) acktype = 2 # message version = 1 elif (stealthLevel==1): # Basic privacy payload (random getpubkey) ackdata = helper_random.randomBytes(32) acktype = 0 # getpubkey version = 4 else: # Minimum viable payload (non stealth) ackdata = helper_random.randomBytes(32) acktype = 2 # message version = 1 ackobject = pack('>I', acktype) + encodeVarint(version) + encodeVarint(streamNumber) + ackdata return ackobject
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server = False, nodeid = None): payload = '' payload += pack('>L', 3) # protocol version. # bitflags of the services I offer. payload += pack('>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0) | (NODE_DANDELION if state.dandelion else 0) ) payload += pack('>q', int(time.time())) payload += pack( '>q', 1) # boolservices of remote connection; ignored by the remote host. if checkSocksIP(remoteHost) and server: # prevent leaking of tor outbound IP payload += encodeHost('127.0.0.1') payload += pack('>H', 8444) else: payload += encodeHost(remoteHost) payload += pack('>H', remotePort) # remote IPv6 and port # bitflags of the services I offer. payload += pack('>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0) | (NODE_DANDELION if state.dandelion else 0) ) payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( '>L', 2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used. # we have a separate extPort and # incoming over clearnet or # outgoing through clearnet if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and state.extPort \ and ((server and not checkSocksIP(remoteHost)) or \ (BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none" and not server)): payload += pack('>H', state.extPort) elif checkSocksIP(remoteHost) and server: # incoming connection over Tor payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) else: # no extPort and not incoming over Tor payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'port')) random.seed() if nodeid is not None: payload += nodeid[0:8] else: payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf userAgent = '/PyBitmessage:' + softwareVersion + '/' payload += encodeVarint(len(userAgent)) payload += userAgent # Streams payload += encodeVarint(len(participatingStreams)) count = 0 for stream in sorted(participatingStreams): payload += encodeVarint(stream) count += 1 # protocol limit, see specification if count >= 160000: break return CreatePacket('version', payload)
def _onSuccess(self, addressVersion, streamNumber, ripe): if addressVersion <= 3: self.checkBoxDisplayMessagesAlreadyInInventory.setText(_translate( "MainWindow", "Address is an old type. We cannot display its past" " broadcasts." )) else: Inventory().flush() doubleHashOfAddressData = hashlib.sha512(hashlib.sha512( encodeVarint(addressVersion) + encodeVarint(streamNumber) + ripe ).digest()).digest() tag = doubleHashOfAddressData[32:] self.recent = Inventory().by_type_and_tag(3, tag) count = len(self.recent) if count == 0: self.checkBoxDisplayMessagesAlreadyInInventory.setText( _translate( "MainWindow", "There are no recent broadcasts from this address" " to display." )) else: self.checkBoxDisplayMessagesAlreadyInInventory.setEnabled(True) self.checkBoxDisplayMessagesAlreadyInInventory.setText( _translate( "MainWindow", "Display the %n recent broadcast(s) from this address.", None, QtCore.QCoreApplication.CodecForTr, count ))
def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''): payload = encodeVarint(fatal) payload += encodeVarint(banTime) payload += encodeVarint(len(inventoryVector)) payload += inventoryVector payload += encodeVarint(len(errorText)) payload += errorText return CreatePacket('error', payload)
def pack_binary(self): """ Packs all variables into a binary string. For now, every variable is assumed to be an integer Format is as follows: [ start(varInt) ][ post_deadline(varInt) ] [ commitment_phase_deadline(varInt) ] """ result = encodeVarint( self.start ) result += encodeVarint( self.post_deadline ) result += encodeVarint( self.commitment_phase_deadline ) return result
def pack_binary_header(type, blockchain, time_data ): """ Pack the header data (type, timeData) into a binary string The format is [ dataType(varInt) ] [ blockchain(varInt) ] [ timeDataLength(varInt) ] [ timeData ] [ actual data ] """ result = encodeVarint( type ) result += encodeVarint( blockchain ) time_data_bin = time_data.pack_binary() result += encodeVarint( len( time_data_bin ) ) result += time_data_bin return result
def encode_vote( answer_no, previous_vote_hash ): """ Encode a vote. Format: [ answer_no(varInt) ] [ pvh_len(varInt) ][ pvh ] """ if previous_vote_hash is None: previous_vote_hash = "" data = encodeVarint( answer_no ) data += encodeVarint( len( previous_vote_hash ) ) data += previous_vote_hash return data
def sendChunk(): if objectCount == 0: return logger.debug( 'Sending huge inv message with %i objects to just this one peer', objectCount) self.append_write_buf( protocol.CreatePacket( 'inv', addresses.encodeVarint(objectCount) + payload))
def reloadMyAddressHashes(): """Reload keys for user's addresses from the config file""" logger.debug('reloading keys from keys.dat file') myECCryptorObjects.clear() myAddressesByHash.clear() myAddressesByTag.clear() # myPrivateKeys.clear() keyfileSecure = checkSensitiveFilePermissions( os.path.join(state.appdata, 'keys.dat')) hasEnabledKeys = False for addressInKeysFile in BMConfigParser().addresses(): isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled') if isEnabled: hasEnabledKeys = True # status addressVersionNumber, streamNumber, hashobj = decodeAddress( addressInKeysFile)[1:] if addressVersionNumber in (2, 3, 4): # Returns a simple 32 bytes of information encoded # in 64 Hex characters, or null if there was an error. privEncryptionKey = hexlify( decodeWalletImportFormat(BMConfigParser().get( addressInKeysFile, 'privencryptionkey'))) # It is 32 bytes encoded as 64 hex characters if len(privEncryptionKey) == 64: myECCryptorObjects[hashobj] = \ highlevelcrypto.makeCryptor(privEncryptionKey) myAddressesByHash[hashobj] = addressInKeysFile tag = hashlib.sha512( hashlib.sha512( encodeVarint(addressVersionNumber) + encodeVarint(streamNumber) + hashobj).digest()).digest()[32:] myAddressesByTag[tag] = addressInKeysFile else: logger.error('Error in reloadMyAddressHashes: Can\'t handle' ' address versions other than 2, 3, or 4.\n') if not keyfileSecure: fixSensitiveFilePermissions(os.path.join(state.appdata, 'keys.dat'), hasEnabledKeys)
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = BMConnectionPool().inboundConnections.values( ) + BMConnectionPool().outboundConnections.values() random.shuffle(connections) for i in connections: now = time.time() timedOut = now - DownloadThread.requestTimeout # this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk with i.objectsNewToMeLock: try: downloadPending = len( list((k for k, v in i.objectsNewToMe.iteritems() if k in missingObjects and missingObjects[k] > timedOut and not v))) except KeyError: continue if downloadPending >= DownloadThread.minPending: continue # keys with True values in the dict try: request = list( (k for k, v in i.objectsNewToMe.iteritems() if k not in missingObjects or missingObjects[k] < timedOut)) except KeyError: continue random.shuffle(request) if not request: continue if len(request ) > DownloadThread.requestChunk - downloadPending: request = request[:DownloadThread.requestChunk - downloadPending] # mark them as pending for k in request: i.objectsNewToMe[k] = False missingObjects[k] = now payload = bytearray() payload.extend(addresses.encodeVarint(len(request))) for chunk in request: payload.extend(chunk) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request)) requested += len(request) if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(5)
def missing_public_key(self, address ): PUBKEY_REQUEST_WAIT_SECONDS = 60 * 60 * 24 last_request_time = self.settings_get_pubkey_last_request_time( address ) if last_request_time is not None and last_request_time > time.time() - PUBKEY_REQUEST_WAIT_SECONDS: # We requested this pubkey recently, so let's not do it again right now # However, we need to put the tag in neededPubkeys in case the user just restarted the client _, addressVersionNumber, streamNumber, ripe = decodeAddress(address) if addressVersionNumber <= 3: shared.neededPubkeys[ripe] = 0 elif addressVersionNumber >= 4: tag = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[32:] # Note that this is the second half of the sha512 hash. if tag not in shared.neededPubkeys: privEncryptionKey = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[:32] # Note that this is the first half of the sha512 hash. import highlevelcrypto shared.neededPubkeys[tag] = (address, highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it. return self.settings_set_pubkey_last_request_time( address, time.time() ) shared.workerQueue.put( ( "requestPubkey", address ) )
def genAckPayload(streamNumber=1, stealthLevel=0): """ Generate and return payload obj. This function generates payload objects for message acknowledgements Several stealth levels are available depending on the privacy needs; a higher level means better stealth, but also higher cost (size+POW) - level 0: a random 32-byte sequence with a message header appended - level 1: a getpubkey request for a (random) dummy key hash - level 2: a standard message, encrypted to a random pubkey """ if stealthLevel == 2: # Generate privacy-enhanced payload # Generate a dummy privkey and derive the pubkey dummyPubKeyHex = highlevelcrypto.privToPub( hexlify(helper_random.randomBytes(32))) # Generate a dummy message of random length # (the smallest possible standard-formatted message is 234 bytes) dummyMessage = helper_random.randomBytes( helper_random.randomrandrange(234, 801)) # Encrypt the message using standard BM encryption (ECIES) ackdata = highlevelcrypto.encrypt(dummyMessage, dummyPubKeyHex) acktype = 2 # message version = 1 elif stealthLevel == 1: # Basic privacy payload (random getpubkey) ackdata = helper_random.randomBytes(32) acktype = 0 # getpubkey version = 4 else: # Minimum viable payload (non stealth) ackdata = helper_random.randomBytes(32) acktype = 2 # message version = 1 ackobject = pack( '>I', acktype) + encodeVarint(version) + encodeVarint(streamNumber) + ackdata return ackobject
def missing_public_key(self, address): PUBKEY_REQUEST_WAIT_SECONDS = 60 * 60 * 24 last_request_time = self.settings_get_pubkey_last_request_time(address) if last_request_time is not None and last_request_time > time.time( ) - PUBKEY_REQUEST_WAIT_SECONDS: # We requested this pubkey recently, so let's not do it again right now # However, we need to put the tag in neededPubkeys in case the user just restarted the client _, addressVersionNumber, streamNumber, ripe = decodeAddress( address) if addressVersionNumber <= 3: shared.neededPubkeys[ripe] = 0 elif addressVersionNumber >= 4: tag = hashlib.sha512( hashlib.sha512( encodeVarint(addressVersionNumber) + encodeVarint(streamNumber) + ripe).digest() ).digest( )[32:] # Note that this is the second half of the sha512 hash. if tag not in shared.neededPubkeys: privEncryptionKey = hashlib.sha512( hashlib.sha512( encodeVarint(addressVersionNumber) + encodeVarint(streamNumber) + ripe).digest() ).digest( )[: 32] # Note that this is the first half of the sha512 hash. import highlevelcrypto shared.neededPubkeys[tag] = ( address, highlevelcrypto.makeCryptor( privEncryptionKey.encode('hex')) ) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it. return self.settings_set_pubkey_last_request_time(address, time.time()) shared.workerQueue.put(("requestPubkey", address))
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = [ x for x in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() if x.fullyEstablished ] helper_random.randomshuffle(connections) try: requestChunk = max( int( min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue try: request = i.objectsNewToMe.randomKeys(requestChunk) except KeyError: continue payload = bytearray() chunkCount = 0 for chunk in request: if chunk in Inventory() and not Dandelion().hasHash(chunk): try: del i.objectsNewToMe[chunk] except KeyError: pass continue payload.extend(chunk) chunkCount += 1 missingObjects[chunk] = now if not chunkCount: continue payload[0:0] = addresses.encodeVarint(chunkCount) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount) requested += chunkCount if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1)
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() random.shuffle(connections) try: requestChunk = max(int(DownloadThread.maxRequestChunk / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() timedOut = now - DownloadThread.requestTimeout # this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk with i.objectsNewToMeLock: try: downloadPending = len(list((k for k, v in i.objectsNewToMe.iteritems() if k in missingObjects and missingObjects[k] > timedOut and not v))) except KeyError: continue if downloadPending >= DownloadThread.minPending: continue # keys with True values in the dict try: request = list((k for k, v in i.objectsNewToMe.iteritems() if k not in missingObjects or missingObjects[k] < timedOut)) except KeyError: continue random.shuffle(request) if not request: continue if len(request) > requestChunk - downloadPending: request = request[:requestChunk - downloadPending] # mark them as pending for k in request: i.objectsNewToMe[k] = False missingObjects[k] = now payload = bytearray() payload.extend(addresses.encodeVarint(len(request))) for chunk in request: payload.extend(chunk) i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request)) requested += len(request) if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(5)
def assembleAddr(peerList): if isinstance(peerList, state.Peer): peerList = (peerList) if not peerList: return b'' retval = b'' for i in range(0, len(peerList), BMProto.maxAddrCount): payload = addresses.encodeVarint(len(peerList[i:i + BMProto.maxAddrCount])) for address in peerList[i:i + BMProto.maxAddrCount]: stream, peer, timestamp = address payload += struct.pack( '>Q', timestamp) # 64-bit time payload += struct.pack('>I', stream) payload += struct.pack( '>q', 1) # service bit flags offered by this node payload += protocol.encodeHost(peer.host) payload += struct.pack('>H', peer.port) # remote port retval += protocol.CreatePacket('addr', payload) return retval
def assemble_addr(peerList): """Create address command""" if isinstance(peerList, Peer): peerList = [peerList] if not peerList: return b'' retval = b'' for i in range(0, len(peerList), MAX_ADDR_COUNT): payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: # 64-bit time payload += struct.pack('>Q', timestamp) payload += struct.pack('>I', stream) # service bit flags offered by this node payload += struct.pack('>q', 1) payload += encodeHost(peer.host) # remote port payload += struct.pack('>H', peer.port) retval += CreatePacket('addr', payload) return retval
def run(self): while not state.shutdown: chunk = [] while True: try: data = invQueue.get(False) if len(data) == 2: BMConnectionPool().handleReceivedObject( data[0], data[1]) else: source = BMConnectionPool().getConnectionByAddr( data[2]) BMConnectionPool().handleReceivedObject( data[0], data[1], source) chunk.append((data[0], data[1])) except Queue.Empty: break # connection not found, handle it as if generated locally except KeyError: BMConnectionPool().handleReceivedObject(data[0], data[1]) if chunk: for connection in BMConnectionPool().inboundConnections.values() + \ BMConnectionPool().outboundConnections.values(): hashes = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] hashes.append(inv[1]) except KeyError: continue if hashes: connection.append_write_buf(protocol.CreatePacket('inv', \ addresses.encodeVarint(len(hashes)) + "".join(hashes))) invQueue.iterate() self.stop.wait(1)
def run(self): while not self._stopped: requested = 0 # Choose downloading peers randomly connections = [x for x in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() if x.fullyEstablished] random.shuffle(connections) try: requestChunk = max(int(min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1) except ZeroDivisionError: requestChunk = 1 for i in connections: now = time.time() try: request = i.objectsNewToMe.randomKeys(requestChunk) except KeyError: continue payload = bytearray() payload.extend(addresses.encodeVarint(len(request))) for chunk in request: if chunk in Inventory() and not Dandelion().hasHash(chunk): try: del i.objectsNewToMe[chunk] except KeyError: pass continue payload.extend(chunk) missingObjects[chunk] = now if not payload: continue i.append_write_buf(protocol.CreatePacket('getdata', payload)) logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request)) requested += len(request) if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1)
def encode_ring_signature( message, c0, ss, Y_tilde ): """ Encode a ring signature for a vote. Note that we don't need to encode the public keys into the addresses, because everybody knows them from the election data. The format is as follows: [ MAGIC_ID ] [ c_0_length(varInt) ][ c_0 ] [ ss_length(varInt) ] for each s_i in ss: [ s_i_length(varInt) ][ s_i ] [ Y_tilde_x_length(varInt) ][ Y_tilde_x ] [ Y_tilde_y_length(varInt) ][ Y_tilde_y ] """ data = RingSignature.MAGIC_ID c0_bin = ECHelper.int2bin( c0 ) data += encodeVarint( len( c0_bin ) ) data += c0_bin data += encodeVarint( len( ss ) ) for s in ss: s_bin = ECHelper.int2bin( s ) data += encodeVarint( len( s_bin ) ) data += s_bin Y_tilde_x_bin = ECHelper.int2bin( Y_tilde.x ) data += encodeVarint( len( Y_tilde_x_bin ) ) data += Y_tilde_x_bin Y_tilde_y_bin = ECHelper.int2bin( Y_tilde.y ) data += encodeVarint( len( Y_tilde_y_bin ) ) data += Y_tilde_y_bin data += encodeVarint( len( message ) ) data += message return data
def encode_ring_signature(message, c0, ss, Y_tilde): """ Encode a ring signature for a vote. Note that we don't need to encode the public keys into the addresses, because everybody knows them from the election data. The format is as follows: [ MAGIC_ID ] [ c_0_length(varInt) ][ c_0 ] [ ss_length(varInt) ] for each s_i in ss: [ s_i_length(varInt) ][ s_i ] [ Y_tilde_x_length(varInt) ][ Y_tilde_x ] [ Y_tilde_y_length(varInt) ][ Y_tilde_y ] """ data = RingSignature.MAGIC_ID c0_bin = ECHelper.int2bin(c0) data += encodeVarint(len(c0_bin)) data += c0_bin data += encodeVarint(len(ss)) for s in ss: s_bin = ECHelper.int2bin(s) data += encodeVarint(len(s_bin)) data += s_bin Y_tilde_x_bin = ECHelper.int2bin(Y_tilde.x) data += encodeVarint(len(Y_tilde_x_bin)) data += Y_tilde_x_bin Y_tilde_y_bin = ECHelper.int2bin(Y_tilde.y) data += encodeVarint(len(Y_tilde_y_bin)) data += Y_tilde_y_bin data += encodeVarint(len(message)) data += message return data
def pack_binary( self ): """ Pack the data into a binary string for storage in DB. The standard format is [ dataType(varInt) ][ actual data ] The actual data is stored as follows: [ length of question (varInt) ][ question ] [ amount of answers (varInt) ] for each answer: [ length of answer (varInt) ][ answer } [ amount of addresses (varInt) ] for each address: [ length of address (varInt) ][ address } [ amount of public keys (varInt) ] for each pubkey: [ length of signing pubkey (varInt) ][ signing pubkey } [ length of encryption pubkey (varInt) ][ encryption pubkey ] """ result = ConsensusData.pack_binary_header(ConsensusData.DATA_TYPE_VOTING, self.blockchain, self.time_data) result += encodeVarint( len( self.question ) ) result += self.question result += encodeVarint( len( self.answers ) ) for answer in self.answers: result += encodeVarint( len( answer ) ) result += answer result += encodeVarint( len( self.addresses ) ) for address in self.addresses: result += encodeVarint( len( address ) ) result += address if self.public_keys is None: result += encodeVarint( 0 ) else: result += encodeVarint( len( self.public_keys ) ) for pubkey in self.public_keys: if pubkey is None: result += encodeVarint( 0 ) result += encodeVarint( 0 ) else: # Signing key first if pubkey[0] is None: result += encodeVarint( 0 ) else: result += encodeVarint( len( pubkey[0] ) ) result += pubkey[0] # Then encryption key if pubkey[1] is None: result += encodeVarint( 0 ) else: result += encodeVarint( len( pubkey[1] ) ) result += pubkey[1] return result
def sendChunk(): if objectCount == 0: return logger.debug('Sending huge inv message with %i objects to just this one peer', objectCount) self.append_write_buf(protocol.CreatePacket('inv', addresses.encodeVarint(objectCount) + payload))
def processbroadcast(self, data): messageProcessingStartTime = time.time() shared.numberOfBroadcastsProcessed += 1 queues.UISignalQueue.put(( 'updateNumberOfBroadcastsProcessed', 'no data')) inventoryHash = calculateInventoryHash(data) readPosition = 20 # bypass the nonce, time, and object type broadcastVersion, broadcastVersionLength = decodeVarint( data[readPosition:readPosition + 9]) readPosition += broadcastVersionLength if broadcastVersion < 4 or broadcastVersion > 5: logger.info('Cannot decode incoming broadcast versions less than 4 or higher than 5. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.') return cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += cleartextStreamNumberLength if broadcastVersion == 4: """ v4 broadcasts are encrypted the same way the msgs are encrypted. To see if we are interested in a v4 broadcast, we try to decrypt it. This was replaced with v5 broadcasts which include a tag which we check instead, just like we do with v4 pubkeys. """ signedData = data[8:readPosition] initialDecryptionSuccessful = False for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items(): try: if initialDecryptionSuccessful: # continue decryption attempts to avoid timing attacks cryptorObject.decrypt(data[readPosition:]) else: decryptedData = cryptorObject.decrypt(data[readPosition:]) toRipe = key # This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key. initialDecryptionSuccessful = True logger.info('EC decryption successful using key associated with ripe hash: %s' % hexlify(key)) except Exception as err: pass # print 'cryptorObject.decrypt Exception:', err if not initialDecryptionSuccessful: # This is not a broadcast I am interested in. logger.debug('Length of time program spent failing to decrypt this v4 broadcast: %s seconds.' % (time.time() - messageProcessingStartTime,)) return elif broadcastVersion == 5: embeddedTag = data[readPosition:readPosition+32] readPosition += 32 if embeddedTag not in shared.MyECSubscriptionCryptorObjects: logger.debug('We\'re not interested in this broadcast.') return # We are interested in this broadcast because of its tag. signedData = data[8:readPosition] # We're going to add some more data which is signed further down. cryptorObject = shared.MyECSubscriptionCryptorObjects[embeddedTag] try: decryptedData = cryptorObject.decrypt(data[readPosition:]) logger.debug('EC decryption successful') except Exception as err: logger.debug('Broadcast version %s decryption Unsuccessful.' % broadcastVersion) return # At this point this is a broadcast I have decrypted and am # interested in. readPosition = 0 sendersAddressVersion, sendersAddressVersionLength = decodeVarint( decryptedData[readPosition:readPosition + 9]) if broadcastVersion == 4: if sendersAddressVersion < 2 or sendersAddressVersion > 3: logger.warning('Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.') return elif broadcastVersion == 5: if sendersAddressVersion < 4: logger.info('Cannot decode senderAddressVersion less than 4 for broadcast version number 5. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.') return readPosition += sendersAddressVersionLength sendersStream, sendersStreamLength = decodeVarint( decryptedData[readPosition:readPosition + 9]) if sendersStream != cleartextStreamNumber: logger.info('The stream number outside of the encryption on which the POW was completed doesn\'t match the stream number inside the encryption. Ignoring broadcast.') return readPosition += sendersStreamLength behaviorBitfield = decryptedData[readPosition:readPosition + 4] readPosition += 4 sendersPubSigningKey = '\x04' + \ decryptedData[readPosition:readPosition + 64] readPosition += 64 sendersPubEncryptionKey = '\x04' + \ decryptedData[readPosition:readPosition + 64] readPosition += 64 if sendersAddressVersion >= 3: requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += varintLength logger.debug('sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is %s' % requiredAverageProofOfWorkNonceTrialsPerByte) requiredPayloadLengthExtraBytes, varintLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += varintLength logger.debug('sender\'s requiredPayloadLengthExtraBytes is %s' % requiredPayloadLengthExtraBytes) endOfPubkeyPosition = readPosition sha = hashlib.new('sha512') sha.update(sendersPubSigningKey + sendersPubEncryptionKey) ripeHasher = hashlib.new('ripemd160') ripeHasher.update(sha.digest()) calculatedRipe = ripeHasher.digest() if broadcastVersion == 4: if toRipe != calculatedRipe: logger.info('The encryption key used to encrypt this message doesn\'t match the keys inbedded in the message itself. Ignoring message.') return elif broadcastVersion == 5: calculatedTag = hashlib.sha512(hashlib.sha512(encodeVarint( sendersAddressVersion) + encodeVarint(sendersStream) + calculatedRipe).digest()).digest()[32:] if calculatedTag != embeddedTag: logger.debug('The tag and encryption key used to encrypt this message doesn\'t match the keys inbedded in the message itself. Ignoring message.') return messageEncodingType, messageEncodingTypeLength = decodeVarint( decryptedData[readPosition:readPosition + 9]) if messageEncodingType == 0: return readPosition += messageEncodingTypeLength messageLength, messageLengthLength = decodeVarint( decryptedData[readPosition:readPosition + 9]) readPosition += messageLengthLength message = decryptedData[readPosition:readPosition + messageLength] readPosition += messageLength readPositionAtBottomOfMessage = readPosition signatureLength, signatureLengthLength = decodeVarint( decryptedData[readPosition:readPosition + 9]) readPosition += signatureLengthLength signature = decryptedData[ readPosition:readPosition + signatureLength] signedData += decryptedData[:readPositionAtBottomOfMessage] if not highlevelcrypto.verify(signedData, signature, hexlify(sendersPubSigningKey)): logger.debug('ECDSA verify failed') return logger.debug('ECDSA verify passed') sigHash = hashlib.sha512(hashlib.sha512(signature).digest()).digest()[32:] # Used to detect and ignore duplicate messages in our inbox fromAddress = encodeAddress( sendersAddressVersion, sendersStream, calculatedRipe) logger.info('fromAddress: %s' % fromAddress) # Let's store the public key in case we want to reply to this person. sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', fromAddress, sendersAddressVersion, decryptedData[:endOfPubkeyPosition], int(time.time()), 'yes') # Check to see whether we happen to be awaiting this # pubkey in order to send a message. If we are, it will do the POW # and send it. self.possibleNewPubkey(fromAddress) fromAddress = encodeAddress( sendersAddressVersion, sendersStream, calculatedRipe) logger.debug('fromAddress: ' + fromAddress) decodedMessage = helper_msgcoding.MsgDecode(messageEncodingType, message) subject = decodedMessage.subject body = decodedMessage.body toAddress = '[Broadcast subscribers]' if helper_inbox.isMessageAlreadyInInbox(sigHash): logger.info('This broadcast is already in our inbox. Ignoring it.') return t = (inventoryHash, toAddress, fromAddress, subject, int( time.time()), body, 'inbox', messageEncodingType, 0, sigHash) helper_inbox.insert(t) queues.UISignalQueue.put(('displayNewInboxMessage', ( inventoryHash, toAddress, fromAddress, subject, body))) # If we are behaving as an API then we might need to run an # outside command to let some program know that a new message # has arrived. if BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'): try: apiNotifyPath = BMConfigParser().get( 'bitmessagesettings', 'apinotifypath') except: apiNotifyPath = '' if apiNotifyPath != '': call([apiNotifyPath, "newBroadcast"]) # Display timing data logger.info('Time spent processing this interesting broadcast: %s' % (time.time() - messageProcessingStartTime,))
def processmsg(self, data): messageProcessingStartTime = time.time() shared.numberOfMessagesProcessed += 1 queues.UISignalQueue.put(( 'updateNumberOfMessagesProcessed', 'no data')) readPosition = 20 # bypass the nonce, time, and object type msgVersion, msgVersionLength = decodeVarint(data[readPosition:readPosition + 9]) if msgVersion != 1: logger.info('Cannot understand message versions other than one. Ignoring message.') return readPosition += msgVersionLength streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint( data[readPosition:readPosition + 9]) readPosition += streamNumberAsClaimedByMsgLength inventoryHash = calculateInventoryHash(data) initialDecryptionSuccessful = False # This is not an acknowledgement bound for me. See if it is a message # bound for me by trying to decrypt it with my private keys. for key, cryptorObject in shared.myECCryptorObjects.items(): try: if initialDecryptionSuccessful: # continue decryption attempts to avoid timing attacks cryptorObject.decrypt(data[readPosition:]) else: decryptedData = cryptorObject.decrypt(data[readPosition:]) toRipe = key # This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data. initialDecryptionSuccessful = True logger.info('EC decryption successful using key associated with ripe hash: %s.' % hexlify(key)) except Exception as err: pass if not initialDecryptionSuccessful: # This is not a message bound for me. logger.info('Length of time program spent failing to decrypt this message: %s seconds.' % (time.time() - messageProcessingStartTime,)) return # This is a message bound for me. toAddress = shared.myAddressesByHash[ toRipe] # Look up my address based on the RIPE hash. readPosition = 0 sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += sendersAddressVersionNumberLength if sendersAddressVersionNumber == 0: logger.info('Cannot understand sendersAddressVersionNumber = 0. Ignoring message.') return if sendersAddressVersionNumber > 4: logger.info('Sender\'s address version number %s not yet supported. Ignoring message.' % sendersAddressVersionNumber) return if len(decryptedData) < 170: logger.info('Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message.') return sendersStreamNumber, sendersStreamNumberLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) if sendersStreamNumber == 0: logger.info('sender\'s stream number is 0. Ignoring message.') return readPosition += sendersStreamNumberLength behaviorBitfield = decryptedData[readPosition:readPosition + 4] readPosition += 4 pubSigningKey = '\x04' + decryptedData[ readPosition:readPosition + 64] readPosition += 64 pubEncryptionKey = '\x04' + decryptedData[ readPosition:readPosition + 64] readPosition += 64 if sendersAddressVersionNumber >= 3: requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += varintLength logger.info('sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is %s' % requiredAverageProofOfWorkNonceTrialsPerByte) requiredPayloadLengthExtraBytes, varintLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += varintLength logger.info('sender\'s requiredPayloadLengthExtraBytes is %s' % requiredPayloadLengthExtraBytes) endOfThePublicKeyPosition = readPosition # needed for when we store the pubkey in our database of pubkeys for later use. if toRipe != decryptedData[readPosition:readPosition + 20]: logger.info('The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack.\n\ See: http://world.std.com/~dtd/sign_encrypt/sign_encrypt7.html \n\ your toRipe: %s\n\ embedded destination toRipe: %s' % (hexlify(toRipe), hexlify(decryptedData[readPosition:readPosition + 20])) ) return readPosition += 20 messageEncodingType, messageEncodingTypeLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += messageEncodingTypeLength messageLength, messageLengthLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += messageLengthLength message = decryptedData[readPosition:readPosition + messageLength] # print 'First 150 characters of message:', repr(message[:150]) readPosition += messageLength ackLength, ackLengthLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += ackLengthLength ackData = decryptedData[readPosition:readPosition + ackLength] readPosition += ackLength positionOfBottomOfAckData = readPosition # needed to mark the end of what is covered by the signature signatureLength, signatureLengthLength = decodeVarint( decryptedData[readPosition:readPosition + 10]) readPosition += signatureLengthLength signature = decryptedData[ readPosition:readPosition + signatureLength] signedData = data[8:20] + encodeVarint(1) + encodeVarint(streamNumberAsClaimedByMsg) + decryptedData[:positionOfBottomOfAckData] if not highlevelcrypto.verify(signedData, signature, hexlify(pubSigningKey)): logger.debug('ECDSA verify failed') return logger.debug('ECDSA verify passed') sigHash = hashlib.sha512(hashlib.sha512(signature).digest()).digest()[32:] # Used to detect and ignore duplicate messages in our inbox # calculate the fromRipe. sha = hashlib.new('sha512') sha.update(pubSigningKey + pubEncryptionKey) ripe = hashlib.new('ripemd160') ripe.update(sha.digest()) fromAddress = encodeAddress( sendersAddressVersionNumber, sendersStreamNumber, ripe.digest()) # Let's store the public key in case we want to reply to this # person. sqlExecute( '''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', fromAddress, sendersAddressVersionNumber, decryptedData[:endOfThePublicKeyPosition], int(time.time()), 'yes') # Check to see whether we happen to be awaiting this # pubkey in order to send a message. If we are, it will do the POW # and send it. self.possibleNewPubkey(fromAddress) # If this message is bound for one of my version 3 addresses (or # higher), then we must check to make sure it meets our demanded # proof of work requirement. If this is bound for one of my chan # addresses then we skip this check; the minimum network POW is # fine. if decodeAddress(toAddress)[1] >= 3 and not BMConfigParser().safeGetBoolean(toAddress, 'chan'): # If the toAddress version number is 3 or higher and not one of my chan addresses: if not shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(fromAddress): # If I'm not friendly with this person: requiredNonceTrialsPerByte = BMConfigParser().getint( toAddress, 'noncetrialsperbyte') requiredPayloadLengthExtraBytes = BMConfigParser().getint( toAddress, 'payloadlengthextrabytes') if not protocol.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes): logger.info('Proof of work in msg is insufficient only because it does not meet our higher requirement.') return blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists. if BMConfigParser().get('bitmessagesettings', 'blackwhitelist') == 'black': # If we are using a blacklist queryreturn = sqlQuery( '''SELECT label FROM blacklist where address=? and enabled='1' ''', fromAddress) if queryreturn != []: logger.info('Message ignored because address is in blacklist.') blockMessage = True else: # We're using a whitelist queryreturn = sqlQuery( '''SELECT label FROM whitelist where address=? and enabled='1' ''', fromAddress) if queryreturn == []: logger.info('Message ignored because address not in whitelist.') blockMessage = True toLabel = BMConfigParser().get(toAddress, 'label') if toLabel == '': toLabel = toAddress decodedMessage = helper_msgcoding.MsgDecode(messageEncodingType, message) subject = decodedMessage.subject body = decodedMessage.body # Let us make sure that we haven't already received this message if helper_inbox.isMessageAlreadyInInbox(sigHash): logger.info('This msg is already in our inbox. Ignoring it.') blockMessage = True if not blockMessage: if messageEncodingType != 0: t = (inventoryHash, toAddress, fromAddress, subject, int( time.time()), body, 'inbox', messageEncodingType, 0, sigHash) helper_inbox.insert(t) queues.UISignalQueue.put(('displayNewInboxMessage', ( inventoryHash, toAddress, fromAddress, subject, body))) # If we are behaving as an API then we might need to run an # outside command to let some program know that a new message # has arrived. if BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'): try: apiNotifyPath = BMConfigParser().get( 'bitmessagesettings', 'apinotifypath') except: apiNotifyPath = '' if apiNotifyPath != '': call([apiNotifyPath, "newMessage"]) # Let us now check and see whether our receiving address is # behaving as a mailing list if BMConfigParser().safeGetBoolean(toAddress, 'mailinglist') and messageEncodingType != 0: try: mailingListName = BMConfigParser().get( toAddress, 'mailinglistname') except: mailingListName = '' # Let us send out this message as a broadcast subject = self.addMailingListNameToSubject( subject, mailingListName) # Let us now send this message out as a broadcast message = time.strftime("%a, %Y-%m-%d %H:%M:%S UTC", time.gmtime( )) + ' Message ostensibly from ' + fromAddress + ':\n\n' + body fromAddress = toAddress # The fromAddress for the broadcast that we are about to send is the toAddress (my address) for the msg message we are currently processing. ackdataForBroadcast = OpenSSL.rand( 32) # We don't actually need the ackdataForBroadcast for acknowledgement since this is a broadcast message but we can use it to update the user interface when the POW is done generating. toAddress = '[Broadcast subscribers]' ripe = '' # We really should have a discussion about how to # set the TTL for mailing list broadcasts. This is obviously # hard-coded. TTL = 2*7*24*60*60 # 2 weeks t = ('', toAddress, ripe, fromAddress, subject, message, ackdataForBroadcast, int(time.time()), # sentTime (this doesn't change) int(time.time()), # lastActionTime 0, 'broadcastqueued', 0, 'sent', messageEncodingType, TTL) helper_sent.insert(t) queues.UISignalQueue.put(('displayNewSentMessage', ( toAddress, '[Broadcast subscribers]', fromAddress, subject, message, ackdataForBroadcast))) queues.workerQueue.put(('sendbroadcast', '')) # Don't send ACK if invalid, blacklisted senders, invisible messages, disabled or chan if self.ackDataHasAValidHeader(ackData) and \ not blockMessage and \ messageEncodingType != 0 and \ not BMConfigParser().safeGetBoolean(toAddress, 'dontsendack') and \ not BMConfigParser().safeGetBoolean(toAddress, 'chan'): shared.checkAndShareObjectWithPeers(ackData[24:]) # Display timing data timeRequiredToAttemptToDecryptMessage = time.time( ) - messageProcessingStartTime shared.successfullyDecryptMessageTimings.append( timeRequiredToAttemptToDecryptMessage) sum = 0 for item in shared.successfullyDecryptMessageTimings: sum += item logger.debug('Time to decrypt this message successfully: %s\n\ Average time for all message decryption successes since startup: %s.' % (timeRequiredToAttemptToDecryptMessage, sum / len(shared.successfullyDecryptMessageTimings)) )
def run(self): logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues))) while self.sendBytes(): deststream, command, data = self.sendDataThreadQueue.get() if deststream == 0 or deststream in self.streamNumber: if command == 'shutdown': logger.debug('sendDataThread (associated with ' + str(self.peer) + ') ID: ' + str(id(self)) + ' shutting down now.') break # When you receive an incoming connection, a sendDataThread is # created even though you don't yet know what stream number the # remote peer is interested in. They will tell you in a version # message and if you too are interested in that stream then you # will continue on with the connection and will set the # streamNumber of this send data thread here: elif command == 'setStreamNumber': self.streamNumber = data logger.debug('setting the stream number to %s', ', '.join(str(x) for x in self.streamNumber)) elif command == 'setRemoteProtocolVersion': specifiedRemoteProtocolVersion = data logger.debug( 'setting the remote node\'s protocol version in the sendDataThread (ID: ' + str(id(self)) + ') to ' + str(specifiedRemoteProtocolVersion)) self.remoteProtocolVersion = specifiedRemoteProtocolVersion elif command == 'sendaddr': if self.connectionIsOrWasFullyEstablished: # only send addr messages if we have sent and heard a verack from the remote node numberOfAddressesInAddrMessage = len(data) payload = '' for hostDetails in data: timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails payload += pack('>Q', timeLastReceivedMessageFromThisNode ) # now uses 64-bit time payload += pack('>I', streamNumber) payload += pack( '>q', services ) # service bit flags offered by this node payload += protocol.encodeHost(host) payload += pack('>H', port) payload = encodeVarint( numberOfAddressesInAddrMessage) + payload packet = protocol.CreatePacket('addr', payload) if not self.sendBytes(packet): break elif command == 'advertiseobject': self.objectHashHolderInstance.holdHash(data) elif command == 'sendinv': if self.connectionIsOrWasFullyEstablished: # only send inv messages if we have send and heard a verack from the remote node payload = '' for hash in data: payload += hash if payload != '': payload = encodeVarint(len(payload) / 32) + payload packet = protocol.CreatePacket('inv', payload) if not self.sendBytes(packet): break elif command == 'pong': if self.lastTimeISentData < (int(time.time()) - 298): # Send out a pong message to keep the connection alive. logger.debug('Sending pong to ' + str(self.peer) + ' to keep connection alive.') packet = protocol.CreatePacket('pong') if not self.sendBytes(packet): break elif command == 'sendRawData': objectHash = None if type(data) in [list, tuple]: objectHash, data = data if not self.sendBytes(data): break if objectHash: PendingUpload().delete(objectHash) elif command == 'connectionIsOrWasFullyEstablished': self.connectionIsOrWasFullyEstablished = True self.services, self.sock = data elif self.connectionIsOrWasFullyEstablished: logger.error('sendDataThread ID: ' + str(id(self)) + ' ignoring command ' + command + ' because the thread is not in stream ' + str(deststream) + ' but in streams ' + ', '.join(str(x) for x in self.streamNumber)) self.sendDataThreadQueue.task_done() # Flush if the cycle ended with break try: self.sendDataThreadQueue.task_done() except ValueError: pass try: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() except: pass state.sendDataQueues.remove(self.sendDataThreadQueue) PendingUpload().threadEnd() logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues))) self.objectHashHolderInstance.close()
def run(self): while state.shutdown == 0: queueValue = queues.addressGeneratorQueue.get() nonceTrialsPerByte = 0 payloadLengthExtraBytes = 0 live = True if queueValue[0] == 'createChan': command, addressVersionNumber, streamNumber, label, \ deterministicPassphrase, live = queueValue eighteenByteRipe = False numberOfAddressesToMake = 1 numberOfNullBytesDemandedOnFrontOfRipeHash = 1 elif queueValue[0] == 'joinChan': command, chanAddress, label, deterministicPassphrase, \ live = queueValue eighteenByteRipe = False addressVersionNumber = decodeAddress(chanAddress)[1] streamNumber = decodeAddress(chanAddress)[2] numberOfAddressesToMake = 1 numberOfNullBytesDemandedOnFrontOfRipeHash = 1 elif len(queueValue) == 7: command, addressVersionNumber, streamNumber, label, \ numberOfAddressesToMake, deterministicPassphrase, \ eighteenByteRipe = queueValue try: numberOfNullBytesDemandedOnFrontOfRipeHash = \ BMConfigParser().getint( 'bitmessagesettings', 'numberofnullbytesonaddress' ) except: if eighteenByteRipe: numberOfNullBytesDemandedOnFrontOfRipeHash = 2 else: # the default numberOfNullBytesDemandedOnFrontOfRipeHash = 1 elif len(queueValue) == 9: command, addressVersionNumber, streamNumber, label, \ numberOfAddressesToMake, deterministicPassphrase, \ eighteenByteRipe, nonceTrialsPerByte, \ payloadLengthExtraBytes = queueValue try: numberOfNullBytesDemandedOnFrontOfRipeHash = \ BMConfigParser().getint( 'bitmessagesettings', 'numberofnullbytesonaddress' ) except: if eighteenByteRipe: numberOfNullBytesDemandedOnFrontOfRipeHash = 2 else: # the default numberOfNullBytesDemandedOnFrontOfRipeHash = 1 elif queueValue[0] == 'stopThread': break else: logger.error( 'Programming error: A structure with the wrong number' ' of values was passed into the addressGeneratorQueue.' ' Here is the queueValue: %r\n', queueValue) if addressVersionNumber < 3 or addressVersionNumber > 4: logger.error( 'Program error: For some reason the address generator' ' queue has been given a request to create at least' ' one version %s address which it cannot do.\n', addressVersionNumber) if nonceTrialsPerByte == 0: nonceTrialsPerByte = BMConfigParser().getint( 'bitmessagesettings', 'defaultnoncetrialsperbyte') if nonceTrialsPerByte < \ defaults.networkDefaultProofOfWorkNonceTrialsPerByte: nonceTrialsPerByte = \ defaults.networkDefaultProofOfWorkNonceTrialsPerByte if payloadLengthExtraBytes == 0: payloadLengthExtraBytes = BMConfigParser().getint( 'bitmessagesettings', 'defaultpayloadlengthextrabytes') if payloadLengthExtraBytes < \ defaults.networkDefaultPayloadLengthExtraBytes: payloadLengthExtraBytes = \ defaults.networkDefaultPayloadLengthExtraBytes if command == 'createRandomAddress': queues.UISignalQueue.put( ('updateStatusBar', tr._translate("MainWindow", "Generating one new address"))) # This next section is a little bit strange. We're going # to generate keys over and over until we find one # that starts with either \x00 or \x00\x00. Then when # we pack them into a Bitmessage address, we won't store # the \x00 or \x00\x00 bytes thus making the address shorter. startTime = time.time() numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0 potentialPrivSigningKey = OpenSSL.rand(32) potentialPubSigningKey = highlevelcrypto.pointMult( potentialPrivSigningKey) while True: numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1 potentialPrivEncryptionKey = OpenSSL.rand(32) potentialPubEncryptionKey = highlevelcrypto.pointMult( potentialPrivEncryptionKey) sha = hashlib.new('sha512') sha.update(potentialPubSigningKey + potentialPubEncryptionKey) ripe = RIPEMD160Hash(sha.digest()).digest() if (ripe[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash): break logger.info('Generated address with ripe digest: %s', hexlify(ripe)) try: logger.info( 'Address generator calculated %s addresses at %s' ' addresses per second before finding one with' ' the correct ripe-prefix.', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime)) except ZeroDivisionError: # The user must have a pretty fast computer. # time.time() - startTime equaled zero. pass address = encodeAddress(addressVersionNumber, streamNumber, ripe) # An excellent way for us to store our keys # is in Wallet Import Format. Let us convert now. # https://en.bitcoin.it/wiki/Wallet_import_format privSigningKey = '\x80' + potentialPrivSigningKey checksum = hashlib.sha256( hashlib.sha256(privSigningKey).digest()).digest()[0:4] privSigningKeyWIF = arithmetic.changebase( privSigningKey + checksum, 256, 58) privEncryptionKey = '\x80' + potentialPrivEncryptionKey checksum = hashlib.sha256( hashlib.sha256(privEncryptionKey).digest()).digest()[0:4] privEncryptionKeyWIF = arithmetic.changebase( privEncryptionKey + checksum, 256, 58) BMConfigParser().add_section(address) BMConfigParser().set(address, 'label', label) BMConfigParser().set(address, 'enabled', 'true') BMConfigParser().set(address, 'decoy', 'false') BMConfigParser().set(address, 'noncetrialsperbyte', str(nonceTrialsPerByte)) BMConfigParser().set(address, 'payloadlengthextrabytes', str(payloadLengthExtraBytes)) BMConfigParser().set(address, 'privsigningkey', privSigningKeyWIF) BMConfigParser().set(address, 'privencryptionkey', privEncryptionKeyWIF) BMConfigParser().save() # The API and the join and create Chan functionality # both need information back from the address generator. queues.apiAddressGeneratorReturnQueue.put(address) queues.UISignalQueue.put( ('updateStatusBar', tr._translate( "MainWindow", "Done generating address. Doing work necessary" " to broadcast it..."))) queues.UISignalQueue.put( ('writeNewAddressToTable', (label, address, streamNumber))) shared.reloadMyAddressHashes() if addressVersionNumber == 3: queues.workerQueue.put(('sendOutOrStoreMyV3Pubkey', ripe)) elif addressVersionNumber == 4: queues.workerQueue.put( ('sendOutOrStoreMyV4Pubkey', address)) elif command == 'createDeterministicAddresses' \ or command == 'getDeterministicAddress' \ or command == 'createChan' or command == 'joinChan': if len(deterministicPassphrase) == 0: logger.warning( 'You are creating deterministic' ' address(es) using a blank passphrase.' ' Bitmessage will do it but it is rather stupid.') if command == 'createDeterministicAddresses': queues.UISignalQueue.put( ('updateStatusBar', tr._translate("MainWindow", "Generating %1 new addresses.").arg( str(numberOfAddressesToMake)))) signingKeyNonce = 0 encryptionKeyNonce = 1 # We fill out this list no matter what although we only # need it if we end up passing the info to the API. listOfNewAddressesToSendOutThroughTheAPI = [] for _ in range(numberOfAddressesToMake): # This next section is a little bit strange. We're # going to generate keys over and over until we find # one that has a RIPEMD hash that starts with either # \x00 or \x00\x00. Then when we pack them into a # Bitmessage address, we won't store the \x00 or # \x00\x00 bytes thus making the address shorter. startTime = time.time() numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0 while True: numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1 potentialPrivSigningKey = hashlib.sha512( deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32] potentialPrivEncryptionKey = hashlib.sha512( deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32] potentialPubSigningKey = highlevelcrypto.pointMult( potentialPrivSigningKey) potentialPubEncryptionKey = highlevelcrypto.pointMult( potentialPrivEncryptionKey) signingKeyNonce += 2 encryptionKeyNonce += 2 sha = hashlib.new('sha512') sha.update(potentialPubSigningKey + potentialPubEncryptionKey) ripe = RIPEMD160Hash(sha.digest()).digest() if (ripe[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash): break logger.info('Generated address with ripe digest: %s', hexlify(ripe)) try: logger.info( 'Address generator calculated %s addresses' ' at %s addresses per second before finding' ' one with the correct ripe-prefix.', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime)) except ZeroDivisionError: # The user must have a pretty fast computer. # time.time() - startTime equaled zero. pass address = encodeAddress(addressVersionNumber, streamNumber, ripe) saveAddressToDisk = True # If we are joining an existing chan, let us check # to make sure it matches the provided Bitmessage address if command == 'joinChan': if address != chanAddress: listOfNewAddressesToSendOutThroughTheAPI.append( 'chan name does not match address') saveAddressToDisk = False if command == 'getDeterministicAddress': saveAddressToDisk = False if saveAddressToDisk and live: # An excellent way for us to store our keys is # in Wallet Import Format. Let us convert now. # https://en.bitcoin.it/wiki/Wallet_import_format privSigningKey = '\x80' + potentialPrivSigningKey checksum = hashlib.sha256( hashlib.sha256( privSigningKey).digest()).digest()[0:4] privSigningKeyWIF = arithmetic.changebase( privSigningKey + checksum, 256, 58) privEncryptionKey = '\x80' + \ potentialPrivEncryptionKey checksum = hashlib.sha256( hashlib.sha256( privEncryptionKey).digest()).digest()[0:4] privEncryptionKeyWIF = arithmetic.changebase( privEncryptionKey + checksum, 256, 58) try: BMConfigParser().add_section(address) addressAlreadyExists = False except: addressAlreadyExists = True if addressAlreadyExists: logger.info( '%s already exists. Not adding it again.', address) queues.UISignalQueue.put( ('updateStatusBar', tr._translate( "MainWindow", "%1 is already in 'Your Identities'." " Not adding it again.").arg(address))) else: logger.debug('label: %s', label) BMConfigParser().set(address, 'label', label) BMConfigParser().set(address, 'enabled', 'true') BMConfigParser().set(address, 'decoy', 'false') if command == 'joinChan' \ or command == 'createChan': BMConfigParser().set(address, 'chan', 'true') BMConfigParser().set(address, 'noncetrialsperbyte', str(nonceTrialsPerByte)) BMConfigParser().set(address, 'payloadlengthextrabytes', str(payloadLengthExtraBytes)) BMConfigParser().set(address, 'privSigningKey', privSigningKeyWIF) BMConfigParser().set(address, 'privEncryptionKey', privEncryptionKeyWIF) BMConfigParser().save() queues.UISignalQueue.put( ('writeNewAddressToTable', (label, address, str(streamNumber)))) listOfNewAddressesToSendOutThroughTheAPI.append( address) shared.myECCryptorObjects[ripe] = \ highlevelcrypto.makeCryptor( hexlify(potentialPrivEncryptionKey)) shared.myAddressesByHash[ripe] = address tag = hashlib.sha512( hashlib.sha512( encodeVarint(addressVersionNumber) + encodeVarint(streamNumber) + ripe).digest()).digest()[32:] shared.myAddressesByTag[tag] = address if addressVersionNumber == 3: # If this is a chan address, # the worker thread won't send out # the pubkey over the network. queues.workerQueue.put( ('sendOutOrStoreMyV3Pubkey', ripe)) elif addressVersionNumber == 4: queues.workerQueue.put( ('sendOutOrStoreMyV4Pubkey', address)) queues.UISignalQueue.put( ('updateStatusBar', tr._translate("MainWindow", "Done generating address"))) elif saveAddressToDisk and not live \ and not BMConfigParser().has_section(address): listOfNewAddressesToSendOutThroughTheAPI.append( address) # Done generating addresses. if command == 'createDeterministicAddresses' \ or command == 'joinChan' or command == 'createChan': queues.apiAddressGeneratorReturnQueue.put( listOfNewAddressesToSendOutThroughTheAPI) elif command == 'getDeterministicAddress': queues.apiAddressGeneratorReturnQueue.put(address) else: raise Exception( "Error in the addressGenerator thread. Thread was" + " given a command it could not understand: " + command) queues.addressGeneratorQueue.task_done()
def sendgetdata(self, hashes): if len(hashes) == 0: return logger.debug('sending getdata to retrieve %i objects', len(hashes)) payload = encodeVarint(len(hashes)) + ''.join(hashes) self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('getdata', payload)), False)
def pack_binary(self): """ Pack the data into a binary string for storage in DB. The standard format is [ dataType(varInt) ][ actual data ] The actual data is stored as follows: [ length of question (varInt) ][ question ] [ amount of answers (varInt) ] for each answer: [ length of answer (varInt) ][ answer } [ amount of addresses (varInt) ] for each address: [ length of address (varInt) ][ address } [ amount of public keys (varInt) ] for each pubkey: [ length of signing pubkey (varInt) ][ signing pubkey } [ length of encryption pubkey (varInt) ][ encryption pubkey ] """ result = ConsensusData.pack_binary_header( ConsensusData.DATA_TYPE_VOTING, self.blockchain, self.time_data) result += encodeVarint(len(self.question)) result += self.question result += encodeVarint(len(self.answers)) for answer in self.answers: result += encodeVarint(len(answer)) result += answer result += encodeVarint(len(self.addresses)) for address in self.addresses: result += encodeVarint(len(address)) result += address if self.public_keys is None: result += encodeVarint(0) else: result += encodeVarint(len(self.public_keys)) for pubkey in self.public_keys: if pubkey is None: result += encodeVarint(0) result += encodeVarint(0) else: # Signing key first if pubkey[0] is None: result += encodeVarint(0) else: result += encodeVarint(len(pubkey[0])) result += pubkey[0] # Then encryption key if pubkey[1] is None: result += encodeVarint(0) else: result += encodeVarint(len(pubkey[1])) result += pubkey[1] return result
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=False, nodeid=None): """ Construct the payload of a version message, return the resulting bytes of running `CreatePacket` on it """ payload = '' payload += pack('>L', 3) # protocol version. # bitflags of the services I offer. payload += pack( '>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0) | (NODE_DANDELION if state.dandelion else 0)) payload += pack('>q', int(time.time())) # boolservices of remote connection; ignored by the remote host. payload += pack('>q', 1) if checkSocksIP(remoteHost) and server: # prevent leaking of tor outbound IP payload += encodeHost('127.0.0.1') payload += pack('>H', 8444) else: # use first 16 bytes if host data is longer # for example in case of onion v3 service try: payload += encodeHost(remoteHost)[:16] except socket.error: payload += encodeHost('127.0.0.1') payload += pack('>H', remotePort) # remote IPv6 and port # bitflags of the services I offer. payload += pack( '>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0) | (NODE_DANDELION if state.dandelion else 0)) # = 127.0.0.1. This will be ignored by the remote host. # The actual remote connected IP will be used. payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( '>L', 2130706433) # we have a separate extPort and incoming over clearnet # or outgoing through clearnet extport = BMConfigParser().safeGetInt('bitmessagesettings', 'extport') if (extport and ((server and not checkSocksIP(remoteHost)) or (BMConfigParser().get('bitmessagesettings', 'socksproxytype') == 'none' and not server))): payload += pack('>H', extport) elif checkSocksIP(remoteHost) and server: # incoming connection over Tor payload += pack( '>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) else: # no extport and not incoming over Tor payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'port')) if nodeid is not None: payload += nodeid[0:8] else: payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf userAgent = '/PyBitmessage:' + softwareVersion + '/' payload += encodeVarint(len(userAgent)) payload += userAgent # Streams payload += encodeVarint(len(participatingStreams)) count = 0 for stream in sorted(participatingStreams): payload += encodeVarint(stream) count += 1 # protocol limit, see specification if count >= 160000: break return CreatePacket('version', payload)
def run(self): # pylint: disable=too-many-branches while not state.shutdown: # pylint: disable=too-many-nested-blocks chunk = [] while True: # Dandelion fluff trigger by expiration handleExpiredDandelion(Dandelion().expire()) try: data = invQueue.get(False) chunk.append((data[0], data[1])) # locally generated if len(data) == 2 or data[2] is None: self.handleLocallyGenerated(data[0], data[1]) except Queue.Empty: break if chunk: for connection in BMConnectionPool().connections(): fluffs = [] stems = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] except KeyError: continue try: if connection == Dandelion().objectChildStem( inv[1]): # Fluff trigger by RNG # auto-ignore if config set to 0, i.e. dandelion is off if random.randint(1, 100) >= state.dandelion: fluffs.append(inv[1]) # send a dinv only if the stem node supports dandelion elif connection.services & protocol.NODE_DANDELION > 0: stems.append(inv[1]) else: fluffs.append(inv[1]) except KeyError: fluffs.append(inv[1]) if fluffs: random.shuffle(fluffs) connection.append_write_buf( protocol.CreatePacket( 'inv', addresses.encodeVarint(len(fluffs)) + ''.join(fluffs))) if stems: random.shuffle(stems) connection.append_write_buf( protocol.CreatePacket( 'dinv', addresses.encodeVarint(len(stems)) + ''.join(stems))) invQueue.iterate() for _ in range(len(chunk)): invQueue.task_done() if Dandelion().refresh < time(): Dandelion().reRandomiseStems() self.stop.wait(1)
def sendinvMessageToJustThisOnePeer(self, numberOfObjects, payload): payload = encodeVarint(numberOfObjects) + payload logger.debug('Sending huge inv message with ' + str(numberOfObjects) + ' objects to just this one peer') self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('inv', payload)))
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=False, nodeid=None): payload = '' payload += pack('>L', 3) # protocol version. payload += pack('>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0)) # bitflags of the services I offer. payload += pack('>q', int(time.time())) payload += pack( '>q', 1) # boolservices of remote connection; ignored by the remote host. if checkSocksIP( remoteHost) and server: # prevent leaking of tor outbound IP payload += encodeHost('127.0.0.1') payload += pack('>H', 8444) else: payload += encodeHost(remoteHost) payload += pack('>H', remotePort) # remote IPv6 and port payload += pack('>q', NODE_NETWORK | (NODE_SSL if haveSSL(server) else 0)) # bitflags of the services I offer. payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( '>L', 2130706433 ) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used. # we have a separate extPort and # incoming over clearnet or # outgoing through clearnet if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and state.extPort \ and ((server and not checkSocksIP(remoteHost)) or \ (BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none" and not server)): payload += pack('>H', state.extPort) elif checkSocksIP(remoteHost) and server: # incoming connection over Tor payload += pack( '>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) else: # no extPort and not incoming over Tor payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'port')) random.seed() if nodeid is not None: payload += nodeid[0:8] else: payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf userAgent = '/PyBitmessage:' + softwareVersion + '/' payload += encodeVarint(len(userAgent)) payload += userAgent # Streams payload += encodeVarint(len(participatingStreams)) count = 0 for stream in sorted(participatingStreams): payload += encodeVarint(stream) count += 1 # protocol limit, see specification if count >= 160000: break return CreatePacket('version', payload)
def run(self): while not state.shutdown: chunk = [] while True: try: data = invQueue.get(False) chunk.append((data[0], data[1])) # locally generated if len(data) == 2: Dandelion().addHash(data[1], None) BMConnectionPool().handleReceivedObject(data[0], data[1]) # came over the network else: source = BMConnectionPool().getConnectionByAddr(data[2]) BMConnectionPool().handleReceivedObject(data[0], data[1], source) except Queue.Empty: break # connection not found, handle it as if generated locally except KeyError: BMConnectionPool().handleReceivedObject(data[0], data[1]) if chunk: for connection in BMConnectionPool().inboundConnections.values() + \ BMConnectionPool().outboundConnections.values(): fluffs = [] stems = [] for inv in chunk: if inv[0] not in connection.streams: continue try: with connection.objectsNewToThemLock: del connection.objectsNewToThem[inv[1]] except KeyError: continue try: if connection == Dandelion().hashMap[inv[1]]: # Fluff trigger by RNG # auto-ignore if config set to 0, i.e. dandelion is off # send a normal inv if stem node doesn't support dandelion if randint(1, 100) < BMConfigParser().safeGetBoolean("network", "dandelion") and \ connection.services | protocol.NODE_DANDELION > 0: stems.append(inv[1]) else: fluffs.append(inv[1]) except KeyError: fluffs.append(inv[1]) if fluffs: shuffle(fluffs) connection.append_write_buf(protocol.CreatePacket('inv', \ addresses.encodeVarint(len(fluffs)) + "".join(fluffs))) if stems: shuffle(stems) connection.append_write_buf(protocol.CreatePacket('dinv', \ addresses.encodeVarint(len(stems)) + "".join(stems))) invQueue.iterate() for i in range(len(chunk)): invQueue.task_done() if Dandelion().refresh < time(): BMConnectionPool().reRandomiseDandelionStems() self.stop.wait(1)