def _doFastPoW(target, initialHash): import shared import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size))) while True: if shared.shutdown: pool.terminate() while True: time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py return for i in range(pool_size): if result[i].ready(): result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... return result[0], result[1] time.sleep(0.2)
def _doFastPoW(target, initialHash): import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append( pool.apply_async(_pool_worker, args=(i, initialHash, target, pool_size))) while True: if shared.shutdown >= 1: pool.terminate() while True: time.sleep( 10 ) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py return for i in range(pool_size): if result[i].ready(): result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... return result[0], result[1] time.sleep(0.2)
def _doFastPoW(target, initialHash): logger.debug("Fast PoW start") import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size))) while True: if shutdown >= 1: pool.terminate() raise Exception("Interrupted") for i in range(pool_size): if result[i].ready(): result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... logger.debug("Fast PoW done") return result[0], result[1] time.sleep(0.2)
def _doFastPoW(target, initialHash, cancellable): import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size))) while True: if shared.shutdown >= 1: pool.terminate() while True: time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py return if (shared.PoWQueue.empty() == True) and cancellable: #If the PoW is cancellable it can be interrupted pool.terminate() pool.join() #Wait for the workers to exit... return [-1, -1] #Special value for differentiation for i in range(pool_size): if result[i].ready(): result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... if cancellable: shared.PoWQueue.get() #If the PoW is cancellable we need to communicate its end to the UI return result[0], result[1] time.sleep(0.2)
def _doFastPoW(target, initialHash): logger.debug("Fast PoW start") import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append( pool.apply_async(_pool_worker, args=(i, initialHash, target, pool_size))) while True: if shutdown >= 1: pool.terminate() raise Exception("Interrupted") for i in range(pool_size): if result[i].ready(): result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... logger.debug("Fast PoW done") return result[0], result[1] time.sleep(0.2)
def _doFastPoW(target, initialHash): logger.debug("Fast PoW start") import time from multiprocessing import Pool, cpu_count try: pool_size = cpu_count() except: pool_size = 4 try: maxCores = config.getint('bitmessagesettings', 'maxcores') except: maxCores = 99999 if pool_size > maxCores: pool_size = maxCores # temporarily disable handlers #int_handler = signal.getsignal(signal.SIGINT) #term_handler = signal.getsignal(signal.SIGTERM) #signal.signal(signal.SIGINT, signal.SIG_IGN) #signal.signal(signal.SIGTERM, signal.SIG_IGN) pool = Pool(processes=pool_size) result = [] for i in range(pool_size): result.append( pool.apply_async(_pool_worker, args=(i, initialHash, target, pool_size))) # re-enable handlers #signal.signal(signal.SIGINT, int_handler) #signal.signal(signal.SIGTERM, term_handler) while True: if shutdown >= 1: pool.terminate() raise StopIteration("Interrupted") for i in range(pool_size): if result[i].ready(): try: result[i].successful() except AssertionError: raise StopIteration("Interrupted") result = result[i].get() pool.terminate() pool.join() #Wait for the workers to exit... logger.debug("Fast PoW done") return result[0], result[1] time.sleep(0.2)