def verifier_search(cts, best_guess, use_n=64, net=net6): #print(best_guess); ck1 = best_guess[0] ^ low_weight ck2 = best_guess[1] ^ low_weight n = len(ck1) ck1 = np.repeat(ck1, n) keys1 = np.copy(ck1) ck2 = np.tile(ck2, n) keys2 = np.copy(ck2) ck1 = np.repeat(ck1, use_n) ck2 = np.repeat(ck2, use_n) ct0a = np.tile(cts[0][0:use_n], n * n) ct1a = np.tile(cts[1][0:use_n], n * n) ct0b = np.tile(cts[2][0:use_n], n * n) ct1b = np.tile(cts[3][0:use_n], n * n) pt0a, pt1a = sp.dec_one_round((ct0a, ct1a), ck1) pt0b, pt1b = sp.dec_one_round((ct0b, ct1b), ck1) pt0a, pt1a = sp.dec_one_round((pt0a, pt1a), ck2) pt0b, pt1b = sp.dec_one_round((pt0b, pt1b), ck2) X = sp.convert_to_binary([pt0a, pt1a, pt0b, pt1b]) Z = net.predict(X, batch_size=10000) Z = Z / (1 - Z) Z = np.log2(Z) Z = Z.reshape(-1, use_n) v = np.mean(Z, axis=1) * len(cts[0]) m = np.argmax(v) val = v[m] key1 = keys1[m] key2 = keys2[m] return (key1, key2, val)
def key_average(ct0a, ct1a, ct0b, ct1b, keys, net): n = len(keys) pt0a, pt1a = sp.dec_one_round((ct0a, ct1a), keys) pt0b, pt1b = sp.dec_one_round((ct0b, ct1b), keys) X = sp.convert_to_binary([pt0a, pt1a, pt0b, pt1b]) Z = net.predict(X, batch_size=10000) Z = Z / (1 - Z) v = np.average(Z) v = v / (v + 1) return (v)
def tpr_fixed_key(net, n, key, nr=7, diff=(0x40, 0x0), batch_size=5000): pt0a = np.frombuffer(urandom(2 * n), dtype=np.uint16) pt1a = np.frombuffer(urandom(2 * n), dtype=np.uint16) pt0b, pt1b = pt0a ^ diff[0], pt1a ^ diff[1] ks = sp.expand_key(key, nr) ct0a, ct1a = sp.encrypt((pt0a, pt1a), ks) ct0b, ct1b = sp.encrypt((pt0b, pt1b), ks) X = sp.convert_to_binary([ct0a, ct1a, ct0b, ct1b]) Z = net.predict(X, batch_size=batch_size).flatten() acc = np.sum(Z > 0.5) / n v = 1 - Z mse = np.mean(v * v) return (acc, mse)
def bayesian_key_recovery(cts, net=net7, m=m7, s=s7, num_cand=32, num_iter=5, seed=None): n = len(cts[0]) keys = np.random.choice(2**(WORD_SIZE - 2), num_cand, replace=False) scores = 0 best = 0 if (not seed is None): keys = np.copy(seed) ct0a, ct1a, ct0b, ct1b = np.tile(cts[0], num_cand), np.tile( cts[1], num_cand), np.tile(cts[2], num_cand), np.tile(cts[3], num_cand) scores = np.zeros(2**(WORD_SIZE - 2)) used = np.zeros(2**(WORD_SIZE - 2)) all_keys = np.zeros(num_cand * num_iter, dtype=np.uint16) all_v = np.zeros(num_cand * num_iter) for i in range(num_iter): k = np.repeat(keys, n) c0a, c1a = sp.dec_one_round((ct0a, ct1a), k) c0b, c1b = sp.dec_one_round((ct0b, ct1b), k) X = sp.convert_to_binary([c0a, c1a, c0b, c1b]) Z = net.predict(X, batch_size=10000) Z = Z.reshape(num_cand, -1) means = np.mean(Z, axis=1) Z = Z / (1 - Z) Z = np.log2(Z) v = np.sum(Z, axis=1) all_v[i * num_cand:(i + 1) * num_cand] = v all_keys[i * num_cand:(i + 1) * num_cand] = np.copy(keys) scores = bayesian_rank_kr(keys, means, m=m, s=s) tmp = np.argpartition(scores + used, num_cand) keys = tmp[0:num_cand] r = np.random.randint(0, 4, num_cand, dtype=np.uint16) r = r << 14 keys = keys ^ r return (all_keys, scores, all_v)
def wrong_key_decryption(n, diff=(0x0040, 0x0), nr=7, net=net7): means = np.zeros(2**16) sig = np.zeros(2**16) for i in range(2**16): keys = np.frombuffer(urandom(8 * n), dtype=np.uint16).reshape(4, -1) ks = sp.expand_key(keys, nr + 1) #ks[nr-1] = 17123; pt0a = np.frombuffer(urandom(2 * n), dtype=np.uint16) pt1a = np.frombuffer(urandom(2 * n), dtype=np.uint16) pt0b, pt1b = pt0a ^ diff[0], pt1a ^ diff[1] ct0a, ct1a = sp.encrypt((pt0a, pt1a), ks) ct0b, ct1b = sp.encrypt((pt0b, pt1b), ks) rsubkeys = i ^ ks[nr] #rsubkeys = rdiff ^ 0; c0a, c1a = sp.dec_one_round((ct0a, ct1a), rsubkeys) c0b, c1b = sp.dec_one_round((ct0b, ct1b), rsubkeys) X = sp.convert_to_binary([c0a, c1a, c0b, c1b]) Z = net.predict(X, batch_size=10000) Z = Z.flatten() means[i] = np.mean(Z) sig[i] = np.std(Z) return (means, sig)
def key_rank_one_round(nr, net, n_blocks=1, diff=(0x0040, 0x0)): pt0a = np.frombuffer(urandom(2 * n_blocks), dtype=np.uint16).reshape(n_blocks, -1) pt1a = np.frombuffer(urandom(2 * n_blocks), dtype=np.uint16).reshape(n_blocks, -1) pt0b, pt1b = pt0a ^ diff[0], pt1a ^ diff[1] pt0a, pt1a = sp.dec_one_round((pt0a, pt1a), 0) pt0b, pt1b = sp.dec_one_round((pt0b, pt1b), 0) key = np.frombuffer(urandom(8), dtype=np.uint16) ks = sp.expand_key(key, nr) k1 = ks[nr - 1] ct0a, ct1a = sp.encrypt((pt0a, pt1a), ks) ct0b, ct1b = sp.encrypt((pt0b, pt1b), ks) trial_keys = np.arange(2**16) c0a, c1a = sp.dec_one_round((ct0a, ct1a), trial_keys) c0b, c1b = sp.dec_one_round((ct0b, ct1b), trial_keys) c1a = np.tile(c1a, 2**16) c1b = np.tile(c1b, 2**16) #the next two lines are the only bits of this function that change #if instead of a neural network the difference distribution table is used for inference #in particular, in this case, conversion to network input is replaced by calculation of trial decryption differences #Z is then calculated simply by looking up the relevant transition probabilities in the ddt #instead of a neural net, the function then expects as second input a table of size 2**32 X = sp.convert_to_binary( [c0a.flatten(), c1a.flatten(), c0b.flatten(), c1b.flatten()]) Z = net.predict(X, batch_size=10000) Z = Z / (1 - Z) Z = np.log2(Z) Z = Z.reshape(n_blocks, -1) Z = np.sum(Z, axis=0) rank0 = np.sum(Z > Z[k1]) rank1 = np.sum(Z >= Z[k1]) return (rank0, rank1)
def evaluate_ciphertexts(ct): ct4, ct5 = sp.dec_one_round((ct[0], ct[1]), 0) ct6, ct7 = sp.dec_one_round((ct[2], ct[3]), 0) X = sp.convert_to_binary([ct[0], ct[1], ct[2], ct[3], ct4, ct5, ct6, ct7]) Z = model.predict(X, batch_size=10000) return (Z.flatten())