def test_base(self): # create a dependency network, and see how the performance changes # when adjusting the amount of threads pool = ThreadPool(0) ni = 1000 # number of items to process for num_threads in range(0, self.max_threads*2 + 1, self.max_threads // 2): pool.set_size(num_threads) for num_transformers in (1, 5, 10): for read_mode in range(2): ts, rcs = add_task_chain(pool, ni, count=num_transformers, feedercls=IteratorThreadTask, transformercls=FixturePerformanceThreadTask, include_verifier=False) mode_info = "read(0)" if read_mode == 1: mode_info = "read(1) * %i" % ni # END mode info fmt = "Threadcount=%%i: Produced %%i items using %s in %%i transformations in %%f s (%%f items / s)\n" % mode_info reader = rcs[-1] st = time.time() if read_mode == 1: for i in range(ni): assert len(reader.read(1)) == 1 # END for each item to read else: assert len(reader.read(0)) == ni # END handle read mode elapsed = time.time() - st sys.stderr.write(fmt % (num_threads, ni, num_transformers, elapsed, ni / elapsed))
def test_usage(self): p = ThreadPool() # default size is 0, synchronous mode assert p.size() == 0 # A task performing processing on items from an iterator t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i*i) reader = p.add_task(t) # read all items - they where procesed by worker 1 items = reader.read() assert len(items) == 10 and items[0] == 0 and items[-1] == 81 # chaining t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i*i) reader = p.add_task(t) # chain both by linking their readers tmult = ChannelThreadTask(reader, "mult", lambda i: i*2) result_reader = p.add_task(tmult) # read all items = result_reader.read() assert len(items) == 10 and items[0] == 0 and items[-1] == 162
class Download(): def __init__(self, folder): self.folder = folder if not os.path.exists(folder): os.makedirs(folder) self.pool = ThreadPool(3) def download(self, dlist): for url in dlist: fname = url.split('/')[-1] self.pool.queueTask(download, (url, os.path.join(self.folder, fname))) self.pool.joinAll()
class Download(): def __init__(self, folder): self.folder = folder if not os.path.exists(folder): os.makedirs(folder) self.pool = ThreadPool(3) def download(self, dlist): for url in dlist: fname = url.split('/')[-1] self.pool.queueTask(download,(url,os.path.join(self.folder,fname))) self.pool.joinAll()
def solve(data, leak, ctext): guesses = [] # Based on the access order of Pi ^ Ki pairs in the first round, see aes.c pairs = [ (0,4), (4,8), (8,12), (12,5), (5,9), (9,13), (13,1), (1,10), (10,14), (14,2), (2,6), (6,15), (15,3), (3,7), (7,11) ] workerPool = ThreadPool(8) workerPool.map(makeGuesses, pairs) for result in workerPool.get_results(): guesses.extend(result) guesses = statisticalFilter(guesses) guesses.sort(key=lambda x : x.cost, reverse=False) pairs = np.zeros((16,16), dtype=np.uint8) for guess in guesses: pairs[guess.i1,guess.i2] = guess.relate pairs[guess.i2,guess.i1] = guess.relate # 00112233445566778899aabbccddeeff # 4355a46b19d348dc2f57c046f8ef63d4 (sha256sum(echo "1")[:16]) keyBase = np.zeros((16), dtype=np.uint8) keyBase[0:6] = leak keyBase[14] = (keyBase[2] ^ pairs[2,14]) & mask keyBase[10] = (keyBase[14] ^ pairs[10,14]) & mask #keyBase[1] = (keyBase[10] ^ pairs[1,10]) & mask keyBase[7] = (keyBase[3] ^ pairs[3,7]) & mask keyBase[15] = (keyBase[3] ^ pairs[3,15]) & mask keyBase[6] = (keyBase[15] ^ pairs[6,15]) & mask #keyBase[0] = (keyBase[4] ^ pairs[4,0]) & mask keyBase[8] = (keyBase[4] ^ pairs[4,8]) & mask keyBase[9] = (keyBase[5] ^ pairs[9,5]) & mask keyBase[12] = (keyBase[5] ^ pairs[5,12]) & mask keyBase[13] = (keyBase[9] ^ pairs[9,13]) & mask print("Base Pair: %s" % "".join(list(map(lambda x : "%02x" % x, keyBase)))) sys.stdout.flush() # 4355a46b19d348dc2f57c046f8ef63d4 (sha256sum(echo "1")[:16]) for k11 in range(0x0,0x100): key = np.array(keyBase, copy=True) key[11] = k11 workerPool.add_task(run, ctext, key) workerPool.get_results()
def test_if_pool_exists(): # setup output_q = Queue() pool = ThreadPool(5) input_values = set(range(50)) # run for i in input_values: pool.add_task(simple_handler, output_q, i) # assert output_q.join() all_values = set() while not output_q.empty(): all_values.add(output_q.get()) tools.assert_equals(input_values, all_values)
def __init__(self, weights): self.weights = weights self.nodes = set(x[0] for x in weights) self.hasher = ConsistantHasher(weights) self.clients = dict((node, RDBClient(node)) for node in self.nodes) self.parallel_transfer = True if self.parallel_transfer: # a thread-pool to support concurrent bulk requests that # span multiple nodes self.thread_pool = ThreadPool(len(self.nodes) * self.pool_size)
def test_usage(self): p = ThreadPool() # default size is 0, synchronous mode assert p.size() == 0 # A task performing processing on items from an iterator t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i * i) reader = p.add_task(t) # read all items - they where procesed by worker 1 items = reader.read() assert len(items) == 10 and items[0] == 0 and items[-1] == 81 # chaining t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i * i) reader = p.add_task(t) # chain both by linking their readers tmult = ChannelThreadTask(reader, "mult", lambda i: i * 2) result_reader = p.add_task(tmult) # read all items = result_reader.read() assert len(items) == 10 and items[0] == 0 and items[-1] == 162
def _assert_async_dependent_tasks(self, pool): # includes failure in center task, 'recursive' orphan cleanup # This will also verify that the channel-close mechanism works # t1 -> t2 -> t3 sys.stderr.write("Threadpool: starting async dependency test in %i threads\n" % pool.size()) null_tasks = pool.num_tasks() ni = 1000 count = 3 aic = count + 2 make_task = lambda *args, **kwargs: add_task_chain(pool, ni, count, *args, **kwargs) ts, rcs = make_task() assert len(ts) == aic assert len(rcs) == aic assert pool.num_tasks() == null_tasks + len(ts) # read(0) ######### st = time.time() items = rcs[-1].read() elapsed = time.time() - st assert len(items) == ni del(rcs) if py2: assert pool.num_tasks() == 0 # tasks depleted, all done, no handles # wait a tiny moment - there could still be something unprocessed on the # queue, increasing the refcount assert sys.getrefcount(ts[-1]) == 2 # ts + call assert sys.getrefcount(ts[0]) == 2 # ts + call sys.stderr.write("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed, ni / elapsed)) # read(1) ######### ts, rcs = make_task() st = time.time() for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to pull elapsed_single = time.time() - st # another read yields nothing, its empty assert len(rcs[-1].read()) == 0 sys.stderr.write("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed_single, ni / elapsed_single)) # read with min-count size ########################### # must be faster, as it will read ni / 4 chunks # Its enough to set one task, as it will force all others in the chain # to min_size as well. ts, rcs = make_task() if py2: assert pool.num_tasks() == len(ts) nri = ni / 4 ts[-1].min_count = nri st = time.time() for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to read elapsed_minsize = time.time() - st # its empty assert len(rcs[-1].read()) == 0 sys.stderr.write("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize)) # it should have been a bit faster at least, and most of the time it is # Sometimes, its not, mainly because: # * The test tasks lock a lot, hence they slow down the system # * Each read will still trigger the pool to evaluate, causing some overhead # even though there are enough items on the queue in that case. Keeping # track of the scheduled items helped there, but it caused further inacceptable # slowdown # assert elapsed_minsize < elapsed_single # read with failure ################### # it should recover and give at least fail_after items # t1 -> x -> t3 fail_after = ni/2 ts, rcs = make_task(fail_setup=[(0, fail_after)]) items = rcs[-1].read() assert len(items) == fail_after # MULTI-POOL # If two pools are connected, this shold work as well. # The second one has just one more thread ts, rcs = make_task() # connect verifier channel as feeder of the second pool p2 = ThreadPool(0) # don't spawn new threads, they have the tendency not to wake up on mutexes assert p2.size() == 0 p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count) assert p2ts[0] is None # we have no feeder task assert rcs[-1].pool_ref()() is pool # it didnt change the pool assert rcs[-1] is p2ts[1].reader() assert p2.num_tasks() == len(p2ts)-1 # first is None # reading from the last one will evaluate all pools correctly st = time.time() items = p2rcs[-1].read() elapsed = time.time() - st assert len(items) == ni sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed)) # loose the handles of the second pool to allow others to go as well del(p2rcs); del(p2ts) assert p2.num_tasks() == 0 # now we lost our old handles as well, and the tasks go away ts, rcs = make_task() if py2: assert pool.num_tasks() == len(ts) p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count) assert p2.num_tasks() == len(p2ts) - 1 # Test multi-read(1) reader = rcs[-1] st = time.time() for i in range(ni): items = reader.read(1) assert len(items) == 1 # END for each item to get elapsed = time.time() - st del(reader) # decrement refcount sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed)) # another read is empty assert len(rcs[-1].read()) == 0 # now that both are connected, I can drop my handle to the reader # without affecting the task-count, but whats more important: # They remove their tasks correctly once we drop our references in the # right order del(p2ts) assert p2rcs[0] is rcs[-1] del(p2rcs) assert p2.num_tasks() == 0 del(p2) if py2: assert pool.num_tasks() == null_tasks + len(ts) del(ts) del(rcs) if py2: assert pool.num_tasks() == null_tasks
class RDBMultiClient(DictNature): pool_size = 5 # use this many threads per RDBClient def __init__(self, weights): self.weights = weights self.nodes = set(x[0] for x in weights) self.hasher = ConsistantHasher(weights) self.clients = dict((node, RDBClient(node)) for node in self.nodes) self.parallel_transfer = True if self.parallel_transfer: # a thread-pool to support concurrent bulk requests that # span multiple nodes self.thread_pool = ThreadPool(len(self.nodes) * self.pool_size) def get(self, key, *a, **kw): self.clients[self.hasher[key]].get(key, *a, **kw) def put(self, key, value, *a, **kw): self.clients[self.hasher[key]].put(key, value, *a, **kw) def delete(self, key, *a, **kw): self.clients[self.hasher[key]].delete(key, *a, **kw) def get_multi(self, keys): return self.bulk(get = keys) def put_multi(self, keys): return self.bulk(put = keys) def delete_multi(self, keys): return self.bulk(delete = keys) def bulk(self, get = [], put = {}, delete = []): """Do multiple _bulk requests in parallel""" if not isinstance(put, dict): put = dict(put) by_node = {} for key in get: by_node.setdefault(self.hasher[key], {}).setdefault('get', []).append(key) for key in delete: by_node.setdefault(self.hasher[key], {}).setdefault('delete', []).append(key) for key, val in put.iteritems(): by_node.setdefault(self.hasher[key], {}).setdefault('put', {})[key] = val funcs = [] for node, ops in by_node.iteritems(): def fetch(_node, _ops): def _fetch(): return self.clients[_node].bulk(**_ops) return _fetch funcs.append(fetch(node, ops)) if self.parallel_transfer and len(funcs) > 1: bulks = self.thread_pool.pmap(funcs) else: bulks = [f() for f in funcs] ret = {} for bulk in bulks: ret.update(bulk) return ret def _by_node(self, keys): ret = {} for key in keys: ret.setdefault(self.hasher[key], []).append(key) return ret.items() def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.weights)
maskPatch = np.array([3, 3, 3, 3, 3], dtype=np.uint8) expand = np.array([8, 6, 4, 2, 0], dtype=np.uint32) for s in range(0, 0x400): patch = np.array((s >> expand) & maskPatch, dtype=np.uint8) key[6:11] &= maskV key[6:11] |= patch cipher = AES.new(array("B", key).tostring(), AES.MODE_ECB) plain = cipher.decrypt(cipherText) if b"flag" in plain: try: sys.stdout.write("%s" % plain.decode('utf-8')) except: sys.stdout.write("False Match Ignored") sys.stdout.flush() found = True if found: return if __name__ == "__main__": keyBase = np.array(bytearray(unhexlify(sys.argv[1]))) ctext = unhexlify(sys.argv[2]) workerPool = ThreadPool(4) for s in range(0, 0x100): maskPatch = np.array([3, 3, 3, 3], dtype=np.uint8) expand = np.array([0, 2, 4, 6], dtype=np.uint8) key = np.array(keyBase, copy=True) key[12:] ^= (s >> expand) & maskPatch workerPool.add_task(bruteForce, *(ctext, key))
for a in soup.select("#files_list tr th a.name"): file_url = a['href'] if file_url.endswith('/download'): # is file logger.info("Found file " + file_url) filename = file_url.split('/')[-2] # splits and gets the word before /download, which is the filename try: extension = get_extension(filename) if extension in archive_types: proj_ext_counter[extension] += 1 except ValueError: # file without extension pass else: # is a directory file_queue.append("http://sourceforge.net" + file_url) pool = ThreadPool(16) for project_name in project_names: proj_ext_counter = Counter() file_queue = deque(["http://sourceforge.net/projects/%s/files/" % project_name]) while file_queue: url = file_queue.popleft() pool.add_task(visit_project_file, url) if not file_queue: pool.wait_completion() try: extension, count = proj_ext_counter.most_common(1)[0] ext_counter[extension] += 1 except IndexError: # no known archive files in project pass print ext_counter
def __init__(self, folder): self.folder = folder if not os.path.exists(folder): os.makedirs(folder) self.pool = ThreadPool(3)
def test_base(self): p = ThreadPool() # default pools have no workers - and threading was removed entirely ... assert p.size() == 0 # SINGLE TASK SERIAL SYNC MODE ############################## # put a few unrelated tasks that we forget about - check ref counts and cleanup t1, t2 = FixtureThreadTask(iter(list()), "nothing1", None), FixtureThreadTask( iter(list()), "nothing2", None) urc1 = p.add_task(t1) urc2 = p.add_task(t2) assert p.num_tasks() == 2 # test pool reader assert urc1.pool_ref()() is p assert urc1.task_ref()() is t1 assert urc1.pool() == p assert urc1.task() == t1 ## SINGLE TASK ################# self._assert_single_task(p, False) if py2: assert p.num_tasks() == 2 del (urc1) if py2: assert p.num_tasks() == 1 p.remove_task(t2) if py2: assert p.num_tasks() == 0 assert sys.getrefcount(t2) == 2 t3 = FixtureChannelThreadTask(urc2, "channel", None) urc3 = p.add_task(t3) if py2: assert p.num_tasks() == 1 del (urc3) if py2: assert p.num_tasks() == 0 assert sys.getrefcount(t3) == 2 # DEPENDENT TASKS SYNC MODE ########################### self._assert_async_dependent_tasks(p)
def _assert_async_dependent_tasks(self, pool): # includes failure in center task, 'recursive' orphan cleanup # This will also verify that the channel-close mechanism works # t1 -> t2 -> t3 sys.stderr.write( "Threadpool: starting async dependency test in %i threads\n" % pool.size()) null_tasks = pool.num_tasks() ni = 1000 count = 3 aic = count + 2 make_task = lambda *args, **kwargs: add_task_chain( pool, ni, count, *args, **kwargs) ts, rcs = make_task() assert len(ts) == aic assert len(rcs) == aic assert pool.num_tasks() == null_tasks + len(ts) # read(0) ######### st = time.time() items = rcs[-1].read() elapsed = time.time() - st assert len(items) == ni del (rcs) if py2: assert pool.num_tasks( ) == 0 # tasks depleted, all done, no handles # wait a tiny moment - there could still be something unprocessed on the # queue, increasing the refcount time.sleep(0.15) assert sys.getrefcount(ts[-1]) == 2 # ts + call assert sys.getrefcount(ts[0]) == 2 # ts + call sys.stderr.write( "Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed, ni / elapsed)) # read(1) ######### ts, rcs = make_task() st = time.time() for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to pull elapsed_single = time.time() - st # another read yields nothing, its empty assert len(rcs[-1].read()) == 0 sys.stderr.write( "Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed_single, ni / elapsed_single)) # read with min-count size ########################### # must be faster, as it will read ni / 4 chunks # Its enough to set one task, as it will force all others in the chain # to min_size as well. ts, rcs = make_task() if py2: assert pool.num_tasks() == len(ts) nri = ni / 4 ts[-1].min_count = nri st = time.time() for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to read elapsed_minsize = time.time() - st # its empty assert len(rcs[-1].read()) == 0 sys.stderr.write( "Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize)) # it should have been a bit faster at least, and most of the time it is # Sometimes, its not, mainly because: # * The test tasks lock a lot, hence they slow down the system # * Each read will still trigger the pool to evaluate, causing some overhead # even though there are enough items on the queue in that case. Keeping # track of the scheduled items helped there, but it caused further inacceptable # slowdown # assert elapsed_minsize < elapsed_single # read with failure ################### # it should recover and give at least fail_after items # t1 -> x -> t3 fail_after = ni / 2 ts, rcs = make_task(fail_setup=[(0, fail_after)]) items = rcs[-1].read() assert len(items) == fail_after # MULTI-POOL # If two pools are connected, this shold work as well. # The second one has just one more thread ts, rcs = make_task() # connect verifier channel as feeder of the second pool p2 = ThreadPool( 0 ) # don't spawn new threads, they have the tendency not to wake up on mutexes assert p2.size() == 0 p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count) assert p2ts[0] is None # we have no feeder task assert rcs[-1].pool_ref()() is pool # it didnt change the pool assert rcs[-1] is p2ts[1].reader() assert p2.num_tasks() == len(p2ts) - 1 # first is None # reading from the last one will evaluate all pools correctly st = time.time() items = p2rcs[-1].read() elapsed = time.time() - st assert len(items) == ni sys.stderr.write( "Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic - 1, elapsed, ni / elapsed)) # loose the handles of the second pool to allow others to go as well del (p2rcs) del (p2ts) assert p2.num_tasks() == 0 # now we lost our old handles as well, and the tasks go away ts, rcs = make_task() if py2: assert pool.num_tasks() == len(ts) p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count) assert p2.num_tasks() == len(p2ts) - 1 # Test multi-read(1) reader = rcs[-1] st = time.time() for i in range(ni): items = reader.read(1) assert len(items) == 1 # END for each item to get elapsed = time.time() - st del (reader) # decrement refcount sys.stderr.write( "Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic - 1, elapsed, ni / elapsed)) # another read is empty assert len(rcs[-1].read()) == 0 # now that both are connected, I can drop my handle to the reader # without affecting the task-count, but whats more important: # They remove their tasks correctly once we drop our references in the # right order del (p2ts) assert p2rcs[0] is rcs[-1] del (p2rcs) assert p2.num_tasks() == 0 del (p2) if py2: assert pool.num_tasks() == null_tasks + len(ts) del (ts) del (rcs) if py2: assert pool.num_tasks() == null_tasks
def test_base(self): p = ThreadPool() # default pools have no workers - and threading was removed entirely ... assert p.size() == 0 # SINGLE TASK SERIAL SYNC MODE ############################## # put a few unrelated tasks that we forget about - check ref counts and cleanup t1, t2 = FixtureThreadTask(iter(list()), "nothing1", None), FixtureThreadTask(iter(list()), "nothing2", None) urc1 = p.add_task(t1) urc2 = p.add_task(t2) assert p.num_tasks() == 2 # test pool reader assert urc1.pool_ref()() is p assert urc1.task_ref()() is t1 assert urc1.pool() == p assert urc1.task() == t1 ## SINGLE TASK ################# self._assert_single_task(p, False) if py2: assert p.num_tasks() == 2 del(urc1) if py2: assert p.num_tasks() == 1 p.remove_task(t2) if py2: assert p.num_tasks() == 0 assert sys.getrefcount(t2) == 2 t3 = FixtureChannelThreadTask(urc2, "channel", None) urc3 = p.add_task(t3) if py2: assert p.num_tasks() == 1 del(urc3) if py2: assert p.num_tasks() == 0 assert sys.getrefcount(t3) == 2 # DEPENDENT TASKS SYNC MODE ########################### self._assert_async_dependent_tasks(p)
h.update(salt.encode('utf-8')) h.update(seed.encode('utf-8')) key = h.digest()[:16] sys.stderr.write("Key Is: %s\n" %key.hex()) sys.stderr.flush() with open("./chal.exe", "rb") as f: data = f.read() data = data.replace(b"\xde\xad\xbe\xef"*4, key) fileOut = "/src/patched.exe" with open(fileOut, "wb") as f: f.write(data) st = os.stat(fileOut) os.chmod(fileOut, st.st_mode | stat.S_IEXEC) workers = ThreadPool(workerCount) tasks = 1024 roundingerror = count - int(count/tasks)*tasks workers.add_task(run, int(count/tasks) + roundingerror, 0) for ii in range(1,tasks): workers.add_task(run, int(count/tasks), ii) textName = "/tmp/test.txt" results = workers.get_results() with open(textName, "wb") as f: for r in results: f.write(r) print(textName)