def main(): candidates = [set(range(256)) for _ in range(8)] data = ['A', 'B', 'C', 'D', 'E', 'F'] results = brute(worker, data_list=data, processes=6) print('results', results) for dataset in results: for byte in range(8): candidates[byte] = candidates[byte].intersection(dataset[byte]) print(candidates) print("".join([chr(list(x)[0]) for x in candidates]).encode("hex"))
def decrypt_paralell(file): exp_range = 2 # was testing 22 diff = 18 # was testing 1024 ct = open(file, 'rb').read() encrypted_flag = bytes_to_long(ct) print(encrypted_flag) primes = get_primes(2**exp_range) dataset = [(encrypted_flag + i, i, primes) for i in range(-diff, diff)] results = brute(factor_worker, dataset, processes=6) return results
def main_d(): maps = brute(worker, range(20), processes=6) memorized_hashes = {} for m in maps: memorized_hashes.update(m) expected_hashes = open("hashes.txt", 'r').readlines() s = "" for h in expected_hashes: s1 = memorized_hashes[h.strip()] s += s1 hsh1 = func1s(s) hsh2 = func2s(s) print(hsh1, hsh2) print('flag{%d}' % (hsh1 * hsh2))
def distributed_bits_collector(encrypted_data, multiplicator, upper_bound, oracle_fun, processes): def worker(data): index, ct = data bit = oracle_fun(ct) print("Recovered bit %d -> %d" % (index, bit)) return index, bit ciphertext = encrypted_data data_set = [] for i in range(len(bin(upper_bound)) - 2): ciphertext = multiplicator(ciphertext) data_set.append((i, ciphertext)) results = brute(worker, data_set, processes) sorted(results, key=lambda x: x[0]) return [bit for index, bit in results]
def collision_search(): bytes_no = rate / 8 space = {} stage = 1000 start = 0 processes = 7 print("generate space") while True: print(str(100 * start / (2.0**(capacity / 2 + 1))) + "%") start += stage results = brute(worker, [[urandom(bytes_no) for _ in range(stage)] for _ in range(processes)], processes=processes) results = reduce(lambda x, y: x + y, results) for (msg, c) in results: c = str(c) if c in space: print(len(space)) return space[c], msg else: space[c] = msg
def calculate_mults_and_add_partial(data, N, parallel): print("Calculating minor chunk") return functools.reduce( gmpy2.add, brute(worker_mults, data, processes=parallel), 1) % N
def multiply(values, parallel): partials = brute(worker_multiply, chunk_with_remainder(values, 100), processes=parallel) return functools.reduce(gmpy2.mul, partials, 1)
def calculate_modinvs(Nxs, moduli, parallel): return brute(worker_mod, zip(Nxs, moduli), processes=parallel)
def calculate_nxs(N, moduli, parallel): return brute(worker_nxs, data_gen(N, moduli), parallel)