Esempio n. 1
0
def doCleanShutdown():
    state.shutdown = 1  #Used to tell proof of work worker threads and the objectProcessorThread to exit.
    objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
    for thread in threading.enumerate():
        if thread.isAlive() and isinstance(thread, StoppableThread):
            thread.stopThread()

    UISignalQueue.put(
        ('updateStatusBar', 'Saving the knownNodes list of peers to disk...'))
    logger.info('Saving knownNodes list of peers to disk')
    saveKnownNodes()
    logger.info('Done saving knownNodes list of peers to disk')
    UISignalQueue.put(('updateStatusBar',
                       'Done saving the knownNodes list of peers to disk.'))
    logger.info('Flushing inventory in memory out to disk...')
    UISignalQueue.put((
        'updateStatusBar',
        'Flushing inventory in memory out to disk. This should normally only take a second...'
    ))
    Inventory().flush()

    # Verify that the objectProcessor has finished exiting. It should have incremented the
    # shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit.
    while state.shutdown == 1:
        time.sleep(.1)

    # This one last useless query will guarantee that the previous flush committed and that the
    # objectProcessorThread committed before we close the program.
    sqlQuery('SELECT address FROM subscriptions')
    logger.info('Finished flushing inventory.')
    sqlStoredProcedure('exit')

    # Wait long enough to guarantee that any running proof of work worker threads will check the
    # shutdown variable and exit. If the main thread closes before they do then they won't stop.
    time.sleep(.25)

    for thread in threading.enumerate():
        if thread is not threading.currentThread() and isinstance(
                thread, StoppableThread):
            logger.debug("Waiting for thread %s", thread.name)
            thread.join()

    # flush queued
    for queue in (workerQueue, UISignalQueue, addressGeneratorQueue,
                  objectProcessorQueue):
        while True:
            try:
                queue.get(False)
                queue.task_done()
            except Queue.Empty:
                break

    if shared.thisapp.daemon:
        logger.info('Clean shutdown complete.')
        shared.thisapp.cleanup()
        os._exit(0)
    else:
        logger.info('Core shutdown complete.')
    for thread in threading.enumerate():
        logger.debug("Thread %s still running", thread.name)
    def run(self):
        gc.disable()
        timeWeLastClearedInventoryAndPubkeysTables = 0
        try:
            shared.maximumLengthOfTimeToBotherResendingMessages = (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxdays')) * 24 * 60 * 60) + (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxmonths')) * (60 * 60 * 24 *365)/12)
        except:
            # Either the user hasn't set stopresendingafterxdays and stopresendingafterxmonths yet or the options are missing from the config file.
            shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')

        # initial wait
        if state.shutdown == 0:
            self.stop.wait(singleCleaner.cycleLength)

        while state.shutdown == 0:
            queues.UISignalQueue.put((
                'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)'))
            Inventory().flush()
            queues.UISignalQueue.put(('updateStatusBar', ''))
            
            # If we are running as a daemon then we are going to fill up the UI
            # queue which will never be handled by a UI. We should clear it to
            # save memory.
            if BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon'):
                queues.UISignalQueue.queue.clear()
            if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
                timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
                Inventory().clean()
                # pubkeys
                sqlExecute(
                    '''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''',
                    int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)

                # Let us resend getpubkey objects if we have not yet heard a pubkey, and also msg objects if we have not yet heard an acknowledgement
                queryreturn = sqlQuery(
                    '''select toaddress, ackdata, status FROM sent WHERE ((status='awaitingpubkey' OR status='msgsent') AND folder='sent' AND sleeptill<? AND senttime>?) ''',
                    int(time.time()),
                    int(time.time()) - shared.maximumLengthOfTimeToBotherResendingMessages)
                for row in queryreturn:
                    if len(row) < 2:
                        logger.error('Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row))
                        self.stop.wait(3)
                        break
                    toAddress, ackData, status = row
                    if status == 'awaitingpubkey':
                        resendPubkeyRequest(toAddress)
                    elif status == 'msgsent':
                        resendMsg(ackData)

            # cleanup old nodes
            now = int(time.time())
            with knownnodes.knownNodesLock:
                for stream in knownnodes.knownNodes:
                    keys = knownnodes.knownNodes[stream].keys()
                    for node in keys:
                        try:
                            # scrap old nodes
                            if now - knownnodes.knownNodes[stream][node]["lastseen"] > 2419200: # 28 days
                                shared.needToWriteKnownNodesToDisk = True
                                del knownnodes.knownNodes[stream][node]
                                continue
                            # scrap old nodes with low rating
                            if now - knownnodes.knownNodes[stream][node]["lastseen"] > 10800 and knownnodes.knownNodes[stream][node]["rating"] <= knownnodes.knownNodesForgetRating:
                                shared.needToWriteKnownNodesToDisk = True
                                del knownnodes.knownNodes[stream][node]
                                continue
                        except TypeError:
                            print "Error in %s" % (str(node))
                    keys = []

            # Let us write out the knowNodes to disk if there is anything new to write out.
            if shared.needToWriteKnownNodesToDisk:
                try:
                    knownnodes.saveKnownNodes()
                except Exception as err:
                    if "Errno 28" in str(err):
                        logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
                        queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
                        if shared.daemon:
                            os._exit(0)
                shared.needToWriteKnownNodesToDisk = False

#            # clear download queues
#            for thread in threading.enumerate():
#                if thread.isAlive() and hasattr(thread, 'downloadQueue'):
#                    thread.downloadQueue.clear()

            # inv/object tracking
            for connection in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values():
                connection.clean()
            # dandelion fluff trigger by expiration
            Dandelion().expire()

            # discovery tracking
            exp = time.time() - singleCleaner.expireDiscoveredPeers
            reaper = (k for k, v in state.discoveredPeers.items() if v < exp)
            for k in reaper:
                try:
                    del state.discoveredPeers[k]
                except KeyError:
                    pass
            # TODO: cleanup pending upload / download

            gc.collect()

            if state.shutdown == 0:
                self.stop.wait(singleCleaner.cycleLength)
    def run(self):
        gc.disable()
        timeWeLastClearedInventoryAndPubkeysTables = 0
        try:
            shared.maximumLengthOfTimeToBotherResendingMessages = (
                float(BMConfigParser().get('lmessagesettings',
                                           'stopresendingafterxdays')) * 24 *
                60 * 60) + (float(BMConfigParser().get(
                    'lmessagesettings', 'stopresendingafterxmonths')) *
                            (60 * 60 * 24 * 365) / 12)
        except:
            # Either the user hasn't set stopresendingafterxdays and
            # stopresendingafterxmonths yet or the options are missing
            # from the config file.
            shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')

        # initial wait
        if state.shutdown == 0:
            self.stop.wait(singleCleaner.cycleLength)

        while state.shutdown == 0:
            queues.UISignalQueue.put(
                ('updateStatusBar',
                 'Doing housekeeping (Flushing inventory in memory to disk...)'
                 ))
            Inventory().flush()
            queues.UISignalQueue.put(('updateStatusBar', ''))

            # If we are running as a daemon then we are going to fill up the UI
            # queue which will never be handled by a UI. We should clear it to
            # save memory.
            # FIXME redundant?
            if shared.thisapp.daemon or not state.enableGUI:
                queues.UISignalQueue.queue.clear()
            if timeWeLastClearedInventoryAndPubkeysTables < \
                    int(time.time()) - 7380:
                timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
                Inventory().clean()
                # pubkeys
                sqlExecute(
                    "DELETE FROM pubkeys WHERE time<? AND usedpersonally='no'",
                    int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)

                # Let us resend getpubkey objects if we have not yet heard
                # a pubkey, and also msg objects if we have not yet heard
                # an acknowledgement
                queryreturn = sqlQuery(
                    "SELECT toaddress, ackdata, status FROM sent"
                    " WHERE ((status='awaitingpubkey' OR status='msgsent')"
                    " AND folder='sent' AND sleeptill<? AND senttime>?)",
                    int(time.time()),
                    int(time.time()) -
                    shared.maximumLengthOfTimeToBotherResendingMessages)
                for row in queryreturn:
                    if len(row) < 2:
                        logger.error(
                            'Something went wrong in the singleCleaner thread:'
                            ' a query did not return the requested fields. %r',
                            row)
                        self.stop.wait(3)
                        break
                    toAddress, ackData, status = row
                    if status == 'awaitingpubkey':
                        resendPubkeyRequest(toAddress)
                    elif status == 'msgsent':
                        resendMsg(ackData)

            # cleanup old nodes
            now = int(time.time())

            with knownnodes.knownNodesLock:
                for stream in knownnodes.knownNodes:
                    keys = knownnodes.knownNodes[stream].keys()
                    for node in keys:
                        try:
                            # scrap old nodes
                            if now - knownnodes.knownNodes[stream][node][
                                    "lastseen"] > 2419200:  # 28 days
                                shared.needToWriteKnownNodesToDisk = True
                                del knownnodes.knownNodes[stream][node]
                                continue
                            # scrap old nodes with low rating
                            if now - knownnodes.knownNodes[stream][node][
                                    "lastseen"] > 10800 and knownnodes.knownNodes[
                                        stream][node][
                                            "rating"] <= knownnodes.knownNodesForgetRating:
                                shared.needToWriteKnownNodesToDisk = True
                                del knownnodes.knownNodes[stream][node]
                                continue
                        except TypeError:
                            print "Error in %s" % node
                    keys = []

            # Let us write out the knowNodes to disk
            # if there is anything new to write out.
            if shared.needToWriteKnownNodesToDisk:
                try:
                    knownnodes.saveKnownNodes()
                except Exception as err:
                    if "Errno 28" in str(err):
                        logger.fatal('(while receiveDataThread'
                                     ' knownnodes.needToWriteKnownNodesToDisk)'
                                     ' Alert: Your disk or data storage volume'
                                     ' is full. ')
                        queues.UISignalQueue.put(
                            ('alert',
                             (tr._translate("MainWindow", "Disk full"),
                              tr._translate(
                                  "MainWindow",
                                  'Alert: Your disk or data storage volume'
                                  ' is full. LMessage will now exit.'), True)))
                        # FIXME redundant?
                        if shared.daemon or not state.enableGUI:
                            os._exit(0)
                shared.needToWriteKnownNodesToDisk = False


#            # clear download queues
#            for thread in threading.enumerate():
#                if thread.isAlive() and hasattr(thread, 'downloadQueue'):
#                    thread.downloadQueue.clear()

# inv/object tracking
            for connection in \
                    BMConnectionPool().inboundConnections.values() + \
                    BMConnectionPool().outboundConnections.values():
                connection.clean()

            # discovery tracking
            exp = time.time() - singleCleaner.expireDiscoveredPeers
            reaper = (k for k, v in state.discoveredPeers.items() if v < exp)
            for k in reaper:
                try:
                    del state.discoveredPeers[k]
                except KeyError:
                    pass
            # TODO: cleanup pending upload / download

            gc.collect()

            if state.shutdown == 0:
                self.stop.wait(singleCleaner.cycleLength)
Esempio n. 4
0
    def run(self):
        timeWeLastClearedInventoryAndPubkeysTables = 0
        try:
            shared.maximumLengthOfTimeToBotherResendingMessages = (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxdays')) * 24 * 60 * 60) + (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxmonths')) * (60 * 60 * 24 *365)/12)
        except:
            # Either the user hasn't set stopresendingafterxdays and stopresendingafterxmonths yet or the options are missing from the config file.
            shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')

        # initial wait
        if state.shutdown == 0:
            self.stop.wait(singleCleaner.cycleLength)

        while state.shutdown == 0:
            queues.UISignalQueue.put((
                'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)'))
            Inventory().flush()
            queues.UISignalQueue.put(('updateStatusBar', ''))
            
            protocol.broadcastToSendDataQueues((
                0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
            # If we are running as a daemon then we are going to fill up the UI
            # queue which will never be handled by a UI. We should clear it to
            # save memory.
            if BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon'):
                queues.UISignalQueue.queue.clear()
            if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
                timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
                Inventory().clean()
                # pubkeys
                sqlExecute(
                    '''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''',
                    int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)

                # Let us resend getpubkey objects if we have not yet heard a pubkey, and also msg objects if we have not yet heard an acknowledgement
                queryreturn = sqlQuery(
                    '''select toaddress, ackdata, status FROM sent WHERE ((status='awaitingpubkey' OR status='msgsent') AND folder='sent' AND sleeptill<? AND senttime>?) ''',
                    int(time.time()),
                    int(time.time()) - shared.maximumLengthOfTimeToBotherResendingMessages)
                for row in queryreturn:
                    if len(row) < 2:
                        logger.error('Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row))
                        self.stop.wait(3)
                        break
                    toAddress, ackData, status = row
                    if status == 'awaitingpubkey':
                        resendPubkeyRequest(toAddress)
                    elif status == 'msgsent':
                        resendMsg(ackData)

            # cleanup old nodes
            now = int(time.time())
            toDelete = []
            with knownnodes.knownNodesLock:
                for stream in knownnodes.knownNodes:
                    for node in knownnodes.knownNodes[stream].keys():
                        try:
                            if now - knownnodes.knownNodes[stream][node]["lastseen"] > 2419200: # 28 days
                                shared.needToWriteKownNodesToDisk = True
                                del knownnodes.knownNodes[stream][node]
                        except TypeError:
                            print "Error in %s" % (str(node))

            # Let us write out the knowNodes to disk if there is anything new to write out.
            if shared.needToWriteKnownNodesToDisk:
                try:
                    knownnodes.saveKnownNodes()
                except Exception as err:
                    if "Errno 28" in str(err):
                        logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
                        queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
                        if shared.daemon:
                            os._exit(0)
                shared.needToWriteKnownNodesToDisk = False

            # clear download queues
            for thread in threading.enumerate():
                if thread.isAlive() and hasattr(thread, 'downloadQueue'):
                    thread.downloadQueue.clear()

            # inv/object tracking
            for connection in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values():
                connection.clean()

            # TODO: cleanup pending upload / download

            if state.shutdown == 0:
                self.stop.wait(singleCleaner.cycleLength)
Esempio n. 5
0
def doCleanShutdown():
    state.shutdown = 1  #Used to tell proof of work worker threads and the objectProcessorThread to exit.

    #Stop sources of new threads
    for thread in threading.enumerate():
        if type(thread).__name__ not in ('outgoingSynSender',
                                         'singleListener'):
            continue
        thread.stopThread()
        thread.join()

    protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data'))
    objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
    for thread in threading.enumerate():
        if thread.isAlive() and isinstance(thread, StoppableThread):
            thread.stopThread()

    UISignalQueue.put(
        ('updateStatusBar', 'Saving the knownNodes list of peers to disk...'))
    logger.info('Saving knownNodes list of peers to disk')
    saveKnownNodes()
    logger.info('Done saving knownNodes list of peers to disk')
    UISignalQueue.put(('updateStatusBar',
                       'Done saving the knownNodes list of peers to disk.'))
    logger.info('Flushing inventory in memory out to disk...')
    UISignalQueue.put((
        'updateStatusBar',
        'Flushing inventory in memory out to disk. This should normally only take a second...'
    ))
    Inventory().flush()

    # Verify that the objectProcessor has finished exiting. It should have incremented the
    # shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit.
    for thread in threading.enumerate():
        if type(thread).__name__ != 'objectProcessor':
            continue
        thread.join()
        break

    # This will guarantee that the previous flush committed and that the
    # objectProcessorThread committed before we close the program.
    sqlStoredProcedure('commit')
    logger.info('Finished flushing inventory.')
    sqlStoredProcedure('exit')
    for thread in threading.enumerate():
        if type(thread).__name__ != 'sqlThread':
            continue
        thread.join()
        break

    # Wait long enough to guarantee that any running proof of work worker threads will check the
    # shutdown variable and exit. If the main thread closes before they do then they won't stop.
    time.sleep(.25)

    for thread in threading.enumerate():
        if isinstance(thread, sendDataThread):
            thread.sendDataThreadQueue.put((0, 'shutdown', 'no data'))
        if thread is not threading.currentThread() and isinstance(
                thread,
                StoppableThread) and not isinstance(thread, outgoingSynSender):
            logger.debug("Waiting for thread %s", thread.name)
            thread.join()

    # flush queued
    for queue in (workerQueue, UISignalQueue, addressGeneratorQueue,
                  objectProcessorQueue):
        while True:
            try:
                queue.get(False)
                queue.task_done()
            except Queue.Empty:
                break

    logger.info('Clean shutdown complete.')

    for thread in threading.enumerate():
        if thread is threading.currentThread():
            continue
        logger.debug("Thread %s still running", thread.name)

    if BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon'):
        shared.thisapp.cleanup()
        os._exit(0)