示例#1
0
def _checkAndShareMsgWithPeers(data):
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass nonce, time, and object type
    objectVersion, objectVersionLength = decodeVarint(
        data[readPosition:readPosition + 9])
    readPosition += objectVersionLength
    streamNumber, streamNumberLength = decodeVarint(
        data[readPosition:readPosition + 9])
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.',
                     streamNumber)
        return
    readPosition += streamNumberLength
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this msg message. Ignoring.')
        return
    # This msg message is valid. Let's let our peers know about it.
    objectType = 2
    Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime,
                                  '')
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's enqueue it to be processed ourselves.
    objectProcessorQueue.put((objectType, data))
示例#2
0
def _checkAndSharePubkeyWithPeers(data):
    if len(data) < 146 or len(data) > 440:  # sanity check
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    addressVersion, varintLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += varintLength
    streamNumber, varintLength = decodeVarint(data[readPosition:readPosition +
                                                   10])
    readPosition += varintLength
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.',
                     streamNumber)
        return
    if addressVersion >= 4:
        tag = data[readPosition:readPosition + 32]
        logger.debug('tag in received pubkey is: %s', hexlify(tag))
    else:
        tag = ''

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this pubkey. Ignoring it.')
        return
    objectType = 1
    Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime,
                                  tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType, data))
示例#3
0
def _checkAndShareBroadcastWithPeers(data):
    if len(data) < 180:
        logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    broadcastVersion, broadcastVersionLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += broadcastVersionLength
    if broadcastVersion >= 2:
        streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
        readPosition += streamNumberLength
        if not streamNumber in state.streamsInWhichIAmParticipating:
            logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
            return
    if broadcastVersion >= 3:
        tag = data[readPosition:readPosition+32]
    else:
        tag = ''
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this broadcast object. Ignoring.')
        return
    # It is valid. Let's let our peers know about it.
    objectType = 3
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
示例#4
0
def _checkAndShareGetpubkeyWithPeers(data):
    if len(data) < 42:
        logger.info(
            'getpubkey message doesn\'t contain enough data. Ignoring.')
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    requestedAddressVersionNumber, addressVersionLength = \
        decodeVarint(data[readPosition:readPosition + 10])
    readPosition += addressVersionLength
    streamNumber, streamNumberLength = \
        decodeVarint(data[readPosition:readPosition + 10])
    if streamNumber not in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %i isn\'t one we are interested in.',
                     streamNumber)
        return
    readPosition += streamNumberLength

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug(
            'We have already received this getpubkey request. Ignoring it.')
        return

    objectType = 0
    Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime,
                                  '')
    # This getpubkey request is valid. Forward to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    protocol.broadcastToSendDataQueues(
        (streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType, data))
示例#5
0
def _checkAndSharePubkeyWithPeers(data):
    if len(data) < 146 or len(data) > 440:  # sanity check
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    addressVersion, varintLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += varintLength
    streamNumber, varintLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += varintLength
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
        return
    if addressVersion >= 4:
        tag = data[readPosition:readPosition + 32]
        logger.debug('tag in received pubkey is: %s', hexlify(tag))
    else:
        tag = ''

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this pubkey. Ignoring it.')
        return
    objectType = 1
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))


    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
示例#6
0
def _checkAndShareBroadcastWithPeers(data):
    if len(data) < 180:
        logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    broadcastVersion, broadcastVersionLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += broadcastVersionLength
    if broadcastVersion >= 2:
        streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
        readPosition += streamNumberLength
        if not streamNumber in state.streamsInWhichIAmParticipating:
            logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
            return
    if broadcastVersion >= 3:
        tag = data[readPosition:readPosition+32]
    else:
        tag = ''
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this broadcast object. Ignoring.')
        return
    # It is valid. Let's let our peers know about it.
    objectType = 3
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
示例#7
0
def _checkAndShareMsgWithPeers(data):
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20 # bypass nonce, time, and object type
    objectVersion, objectVersionLength = decodeVarint(
        data[readPosition:readPosition + 9])
    readPosition += objectVersionLength
    streamNumber, streamNumberLength = decodeVarint(
        data[readPosition:readPosition + 9])
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
        return
    readPosition += streamNumberLength
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this msg message. Ignoring.')
        return
    # This msg message is valid. Let's let our peers know about it.
    objectType = 2
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime,'')
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's enqueue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
示例#8
0
def _checkAndShareGetpubkeyWithPeers(data):
    if len(data) < 42:
        logger.info('getpubkey message doesn\'t contain enough data. Ignoring.')
        return
    if len(data) > 200:
        logger.info('getpubkey is abnormally long. Sanity check failed. Ignoring object.')
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    requestedAddressVersionNumber, addressVersionLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += addressVersionLength
    streamNumber, streamNumberLength = decodeVarint(
        data[readPosition:readPosition + 10])
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
        return
    readPosition += streamNumberLength

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this getpubkey request. Ignoring it.')
        return

    objectType = 0
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime,'')
    # This getpubkey request is valid. Forward to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
示例#9
0
def doCleanShutdown():
    state.shutdown = 1  #Used to tell proof of work worker threads and the objectProcessorThread to exit.
    objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
    for thread in threading.enumerate():
        if thread.isAlive() and isinstance(thread, StoppableThread):
            thread.stopThread()

    UISignalQueue.put(
        ('updateStatusBar', 'Saving the knownNodes list of peers to disk...'))
    logger.info('Saving knownNodes list of peers to disk')
    saveKnownNodes()
    logger.info('Done saving knownNodes list of peers to disk')
    UISignalQueue.put(('updateStatusBar',
                       'Done saving the knownNodes list of peers to disk.'))
    logger.info('Flushing inventory in memory out to disk...')
    UISignalQueue.put((
        'updateStatusBar',
        'Flushing inventory in memory out to disk. This should normally only take a second...'
    ))
    Inventory().flush()

    # Verify that the objectProcessor has finished exiting. It should have incremented the
    # shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit.
    while state.shutdown == 1:
        time.sleep(.1)

    # This one last useless query will guarantee that the previous flush committed and that the
    # objectProcessorThread committed before we close the program.
    sqlQuery('SELECT address FROM subscriptions')
    logger.info('Finished flushing inventory.')
    sqlStoredProcedure('exit')

    # Wait long enough to guarantee that any running proof of work worker threads will check the
    # shutdown variable and exit. If the main thread closes before they do then they won't stop.
    time.sleep(.25)

    for thread in threading.enumerate():
        if thread is not threading.currentThread() and isinstance(
                thread, StoppableThread):
            logger.debug("Waiting for thread %s", thread.name)
            thread.join()

    # flush queued
    for queue in (workerQueue, UISignalQueue, addressGeneratorQueue,
                  objectProcessorQueue):
        while True:
            try:
                queue.get(False)
                queue.task_done()
            except Queue.Empty:
                break

    if shared.thisapp.daemon:
        logger.info('Clean shutdown complete.')
        shared.thisapp.cleanup()
        os._exit(0)
    else:
        logger.info('Core shutdown complete.')
    for thread in threading.enumerate():
        logger.debug("Thread %s still running", thread.name)
示例#10
0
    def bm_command_object(self):
        """Incoming object, process it"""
        objectOffset = self.payloadOffset
        nonce, expiresTime, objectType, version, streamNumber = \
            self.decode_payload_content("QQIvv")
        self.object = BMObject(nonce, expiresTime, objectType, version,
                               streamNumber, self.payload, self.payloadOffset)

        if len(self.payload) - self.payloadOffset > MAX_OBJECT_PAYLOAD_SIZE:
            logger.info(
                'The payload length of this object is too large (%d bytes).'
                ' Ignoring it.',
                len(self.payload) - self.payloadOffset)
            raise BMProtoExcessiveDataError()

        try:
            self.object.checkProofOfWorkSufficient()
            self.object.checkEOLSanity()
            self.object.checkAlreadyHave()
        except (BMObjectExpiredError, BMObjectAlreadyHaveError,
                BMObjectInsufficientPOWError):
            BMProto.stopDownloadingObject(self.object.inventoryHash)
            raise
        try:
            self.object.checkStream()
        except BMObjectUnwantedStreamError:
            acceptmismatch = BMConfigParser().get("inventory",
                                                  "acceptmismatch")
            BMProto.stopDownloadingObject(self.object.inventoryHash,
                                          acceptmismatch)
            if not acceptmismatch:
                raise

        try:
            self.object.checkObjectByType()
            objectProcessorQueue.put(
                (self.object.objectType, buffer(self.object.data)))
        except BMObjectInvalidError:
            BMProto.stopDownloadingObject(self.object.inventoryHash, True)
        else:
            try:
                del missingObjects[self.object.inventoryHash]
            except KeyError:
                pass

        if self.object.inventoryHash in Inventory() and Dandelion().hasHash(
                self.object.inventoryHash):
            Dandelion().removeHash(self.object.inventoryHash,
                                   "cycle detection")

        Inventory()[self.object.inventoryHash] = (
            self.object.objectType, self.object.streamNumber,
            buffer(self.payload[objectOffset:]), self.object.expiresTime,
            buffer(self.object.tag))
        self.handleReceivedObject(self.object.streamNumber,
                                  self.object.inventoryHash)
        invQueue.put((self.object.streamNumber, self.object.inventoryHash,
                      self.destination))
        return True
示例#11
0
    def bm_command_object(self):
        objectOffset = self.payloadOffset
        nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content(
            "QQIvv")
        self.object = BMObject(nonce, expiresTime, objectType, version,
                               streamNumber, self.payload, self.payloadOffset)

        if len(self.payload
               ) - self.payloadOffset > BMProto.maxObjectPayloadSize:
            logger.info(
                'The payload length of this object is too large (%s bytes). Ignoring it.'
                % len(self.payload) - self.payloadOffset)
            raise BMProtoExcessiveDataError()

        try:
            self.object.checkProofOfWorkSufficient()
            self.object.checkEOLSanity()
            self.object.checkAlreadyHave()
        except (BMObjectExpiredError, BMObjectAlreadyHaveError,
                BMObjectInsufficientPOWError) as e:
            BMProto.stopDownloadingObject(self.object.inventoryHash)
            raise e
        try:
            self.object.checkStream()
        except (BMObjectUnwantedStreamError, ) as e:
            BMProto.stopDownloadingObject(
                self.object.inventoryHash,
                BMConfigParser().get("inventory", "acceptmismatch"))
            if not BMConfigParser().get("inventory", "acceptmismatch"):
                raise e

        try:
            self.object.checkObjectByType()
            objectProcessorQueue.put(
                (self.object.objectType, buffer(self.object.data)))
        except BMObjectInvalidError as e:
            BMProto.stopDownloadingObject(self.object.inventoryHash, True)
        else:
            try:
                del state.missingObjects[self.object.inventoryHash]
            except KeyError:
                pass

        Inventory()[self.object.inventoryHash] = (
            self.object.objectType, self.object.streamNumber,
            buffer(self.payload[objectOffset:]), self.object.expiresTime,
            buffer(self.object.tag))
        invQueue.put((self.object.streamNumber, self.object.inventoryHash,
                      self.destination))
        return True
示例#12
0
    def bm_command_object(self):
        objectOffset = self.payloadOffset
        nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content("QQIvv")
        self.object = BMObject(nonce, expiresTime, objectType, version, streamNumber, self.payload, self.payloadOffset)

        if len(self.payload) - self.payloadOffset > BMProto.maxObjectPayloadSize:
            logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(self.payload) - self.payloadOffset)
            raise BMProtoExcessiveDataError()

        try:
            self.object.checkProofOfWorkSufficient()
            self.object.checkEOLSanity()
            self.object.checkAlreadyHave()
        except (BMObjectExpiredError, BMObjectAlreadyHaveError, BMObjectInsufficientPOWError) as e:
            BMProto.stopDownloadingObject(self.object.inventoryHash)
            raise e
        try:
            self.object.checkStream()
        except (BMObjectUnwantedStreamError,) as e:
            BMProto.stopDownloadingObject(self.object.inventoryHash, BMConfigParser().get("inventory", "acceptmismatch"))
            if not BMConfigParser().get("inventory", "acceptmismatch"):
                raise e

        try:
            self.object.checkObjectByType()
            objectProcessorQueue.put((self.object.objectType, buffer(self.object.data)))
        except BMObjectInvalidError as e:
            BMProto.stopDownloadingObject(self.object.inventoryHash, True)
        else:
            try:
                del state.missingObjects[self.object.inventoryHash]
            except KeyError:
                pass

        Inventory()[self.object.inventoryHash] = (
                self.object.objectType, self.object.streamNumber, buffer(self.payload[objectOffset:]), self.object.expiresTime, buffer(self.object.tag))
        invQueue.put((self.object.streamNumber, self.object.inventoryHash, self.destination))
        return True
示例#13
0
def doCleanShutdown():
    state.shutdown = 1  #Used to tell proof of work worker threads and the objectProcessorThread to exit.

    #Stop sources of new threads
    for thread in threading.enumerate():
        if type(thread).__name__ not in ('outgoingSynSender',
                                         'singleListener'):
            continue
        thread.stopThread()
        thread.join()

    protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data'))
    objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
    for thread in threading.enumerate():
        if thread.isAlive() and isinstance(thread, StoppableThread):
            thread.stopThread()

    UISignalQueue.put(
        ('updateStatusBar', 'Saving the knownNodes list of peers to disk...'))
    logger.info('Saving knownNodes list of peers to disk')
    saveKnownNodes()
    logger.info('Done saving knownNodes list of peers to disk')
    UISignalQueue.put(('updateStatusBar',
                       'Done saving the knownNodes list of peers to disk.'))
    logger.info('Flushing inventory in memory out to disk...')
    UISignalQueue.put((
        'updateStatusBar',
        'Flushing inventory in memory out to disk. This should normally only take a second...'
    ))
    Inventory().flush()

    # Verify that the objectProcessor has finished exiting. It should have incremented the
    # shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit.
    for thread in threading.enumerate():
        if type(thread).__name__ != 'objectProcessor':
            continue
        thread.join()
        break

    # This will guarantee that the previous flush committed and that the
    # objectProcessorThread committed before we close the program.
    sqlStoredProcedure('commit')
    logger.info('Finished flushing inventory.')
    sqlStoredProcedure('exit')
    for thread in threading.enumerate():
        if type(thread).__name__ != 'sqlThread':
            continue
        thread.join()
        break

    # Wait long enough to guarantee that any running proof of work worker threads will check the
    # shutdown variable and exit. If the main thread closes before they do then they won't stop.
    time.sleep(.25)

    for thread in threading.enumerate():
        if isinstance(thread, sendDataThread):
            thread.sendDataThreadQueue.put((0, 'shutdown', 'no data'))
        if thread is not threading.currentThread() and isinstance(
                thread,
                StoppableThread) and not isinstance(thread, outgoingSynSender):
            logger.debug("Waiting for thread %s", thread.name)
            thread.join()

    # flush queued
    for queue in (workerQueue, UISignalQueue, addressGeneratorQueue,
                  objectProcessorQueue):
        while True:
            try:
                queue.get(False)
                queue.task_done()
            except Queue.Empty:
                break

    logger.info('Clean shutdown complete.')

    for thread in threading.enumerate():
        if thread is threading.currentThread():
            continue
        logger.debug("Thread %s still running", thread.name)

    if BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon'):
        shared.thisapp.cleanup()
        os._exit(0)