def predict(): global graph global model message = request.get_json(force=True) encoded = message['encoded'] if len(encoded) > 5000: response = { 'predictions': [-1] } return jsonify(response) else: newline_id = 4 encoded = list(split_list(encoded, newline_id)) predictions = [] with graph.as_default(): for e in encoded: if predictions != []: predictions.append(0) if len(e) == 2: continue predictions.extend(list(np.argmax(model.predict(np.array([e])).squeeze()[1:-1], axis=-1))) predictions = [int(prediction) for prediction in predictions] response = { 'predictions': predictions } return jsonify(response)
def BlockFrequency(e, M=10): M = 10 ce = list(e) ce = ce[:len(ce) - (len(ce) % M)] n = len(ce) N = n / M run_test = True outstr = "Frequency Test within a Block:\n" outstr += "n = %d\n" % n outstr += "M = %d\n" % M outstr += "N = %d\n" % N if n < 100: run_test = False else: Pi_l = split_list(ce, M, proportion) Chi2_obs = 4 * M * sum([pow((i - 0.5), 2) for i in Pi_l]) p = igamc(N / 2.0, Chi2_obs / 2.0) outstr = "Frequency Test within a Block:\n" outstr += "n = %d\n" % n outstr += "M = %d\n" % M outstr += "N = %d\n" % N outstr += "Chi^2_obs = %.6f\n" % Chi2_obs outstr += "P-value = %.6f\n" % p if not run_test: outstr += 'Test not run: pre-test condition not met: ' outstr += 'n >= 100\n' open("./test_results/BlockFrequency.txt", "w").write(outstr) if run_test: if p < 0.01: return False return True
def LongestRunOfOnes(e): ce = list(e) n = len(ce) run_test = True outstr = "Test for the Longest Run of Ones in a Block:\n" outstr += "n = %d\n" % n if n < 128: run_test = False else: if n >= 750000: # set constants & instantiate the v_i table M, K, N, iv = 10000, 6, 75, 10 vt = {10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0, 16: 0} elif n >= 6272: M, K, N, iv = 128, 5, 49, 4 vt = {4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0} else: # 128 <= n < 6272 M, K, N, iv = 8, 3, 16, 1 vt = {1: 0, 2: 0, 3: 0, 4: 0} ce = ce[:n - (n % M)] # discard bits that don't fit n = len(ce) block_list = split_list(ce, M) # list of M-bit blocks runs = [] for block in block_list: # Tabulate freq.s of longest runs lr = longest_run(block) runs.append((block, lr)) if lr in vt.keys(): vt[lr] += 1 elif lr < min(vt.keys()): vt[min(vt.keys())] += 1 elif lr > max(vt.keys()): vt[max(vt.keys())] += 1 pre_Chi2_l = [] for i in xrange(K + 1): idx = iv + i num = pow((vt[idx] - N * pi_i[(K, M)][iv + i]), 2) den = float(N * pi_i[(K, M)][idx]) pre_Chi2_l.append(num / den) Chi2_obs = sum(pre_Chi2_l) p = 1 - igamc(K / 2.0, Chi2_obs / 2.0) if n <= 256: outstr += "Blocks (& longest runs):" for i in xrange(len(runs)): (b, l) = runs[i] outstr += " %s (%d)\t" % ("".join([str(c) for c in b]), l) if i % 2: outstr += '\n' outstr += "V_i values:\n" for i in sorted(vt.keys()): outstr += " V_%d = %d\n" % (i - iv, vt[i]) outstr += "Chi2_obs = %.6f\n" % Chi2_obs outstr += "P-value = %.6f\n" % p if not run_test: outstr += 'Test not run: pre-test condition not met: ' outstr += 'n >= 128\n' open("./test_results/LongestRunOfOnes.txt", "w").write(outstr) if run_test: if p < 0.01: return False return True
def NonOverlappingTemplateMatching(e, m=6): n = len(e) # m = 6 # min 2, max 16, rec 9 or 10 N = 8 # fixed value as per spec M = int(n / float(N)) ce = list(e) ce = ce[:len(ce) - (len(ce) % M)] blocks = split_list(ce, M) templates = sorted(get_templates(m)) outstr = "Non-Overlapping Template Matching Test:\n" outstr += "n = %d\n" % n outstr += "N = %d\n" % N outstr += "M = %d\n" % M outstr += "%d templates to iterate over:\n" % len(templates) p_l = [] first = True for B in templates: W = map(int, xrange(N)) for j in xrange(N): block = blocks[j] hits = 0 idx = 0 while idx < M - m: if block[idx:idx + m] == B: hits += 1 increment = m else: increment = 1 idx += increment if block[idx:idx + m] == []: raise LengthError() W[j] = hits mu = float(M - m + 1) / pow(2, m) sigma2 = M * (1.0 / pow(2, m) - float(2 * m - 1) / pow(2, (2 * m))) pre_Chi2_l = map(lambda j: pow((W[j] - mu), 2) / sigma2, xrange(N)) Chi2_obs = sum(pre_Chi2_l) p = igamc(N / 2.0, Chi2_obs / 2.0) if not first: outstr += '\n----------------' else: first = False outstr += "\nB = %s\n" % ''.join(map(str, B)) outstr += "mu = %.6f\n" % mu outstr += "sigma2 = %.6f\n" % sigma2 outstr += "Chi2_obs = %.6f\n" % Chi2_obs outstr += "W = %s\n" % W outstr += "P-value = %.6f\n" % p p_l.append(p) open("./test_results/NonOverlappingTemplateMatching.txt", "w").write(outstr) for j in xrange(len(p_l)): p = p_l[j] if p < 0.01: return False return True
def fetch_users(db: Dict[str, Dict[User, Talks]], args: Namespace) -> None: """Fetch users from database and display their usernames.""" if args.list: # find users / find users -ls / find users --list print('Available users are:') # Print usernames in columns for chunk in split_list(list(db['talks'].keys()), 3): for user in chunk: print(f" # {user:20}", end="") print()
def fetch_tags(db: Dict[str, Dict[User, Talks]], args: Namespace) -> None: """Display available tags in database.""" if args.list: # find tags / find tags -ls / find tags --list print('Available tags are:') # Print tags in columns for chunk in split_list(db['tags'], 3): for tag in chunk: print(f" # {tag:20}", end="") print()
def Universal(e): ce = list(e) n = len(ce) L, Q, expectedValue, variance = find_LQ(n) K = int(n / L) - Q init_seg = split_list(ce[:Q * L], L) test_seg = split_list(ce[Q * L:(Q + K) * L], L) T = [int(0) for i in xrange(pow(2, L))] for i in xrange(Q): cur_block = ''.join([str(j) for j in init_seg[i]]) cur_val = int(cur_block, 2) T[cur_val] = i f_n = 0 for i in xrange(K): cur_block = ''.join([str(j) for j in test_seg[i]]) cur_val = int(cur_block, 2) last_occurrence = T[cur_val] T[cur_val] = i distance = Q + i - last_occurrence f_n += log(distance, 2) s = f_n f_n /= K c = 0.7 - 0.8 / L + (4.0 + 32.0 / L) * pow(K, (-3.0 / L)) / 15.0 sigma = c * sqrt(variance / K) p = erfc(abs((f_n - expectedValue) / (sqrt(2) * sigma))) # BUG # QUESTION: Use sigma or variance in p? outstr = 'Maurer\'s "Universal Statistical" Test:\n' outstr += "n = %d, L = %d, Q = %d\n" % (n, L, Q) outstr += "Note: %d bits are discarded\n" % (n - (Q + K) * L) outstr += "c = %.6f, sigma = %.6f, K = %d, sum = %.6f\n" % ( c, sigma, K, s) outstr += "f_n = %.6f, expectedValue = %.6f, variance = %.4f\n" % ( f_n, expectedValue, variance) outstr += "P-value = %.6f\n" % p open("./test_results/Universal.txt", "w").write(outstr) if p < 0.01: return False else: return True
def get_list_sessions(self): """ Obtain the list of sessions, ids in a folder. """ log(self.repo + " > Getting sessions for {0}.".format(self.name)) fields, sql = sql_oracle.list_session result = self.db.execute(sql, d(folder_id=self.id)) for row in result: rec = get_rec(row, fields) self.sessions_id[rec.session_id] = self.sessions[rec.session_name] = Session(**rec) # Obtain connections for ids in split_list(self.sessions_id.keys(), 499): fields, sql = sql_oracle.list_session_conns # result = self.db.execute(sql, session_id=str(tuple(self.sessions_id.keys()))) result = self.db.execute(sql.format(session_id=str(tuple(ids)))) for row in result: rec = get_rec(row, fields) self.sessions_id[rec.session_id].add_connection(rec.connection_type, rec.connection_name)
print(all_generated_groups) #%% for i in range(1, myparser.num_calls + 1): all_generated_groups = all_generated_groups + groups.create_groups( myparser.all_emails, myparser.desired, i) print("hi") #print the generated groups to check print(len(all_generated_groups[0])) print(all_generated_groups) #%% import helpers import threading batched_lists = helpers.split_list(all_generated_groups, myparser.num_threads) #batchedLists = splitList(batchedLists, num_threads) print("Batched lists are:") print(batched_lists) #%% thread_list = [] error_count = 0 for i in batched_lists: try: t = threading.Thread(target=helpers.go_thread, args=( i, batched_lists.index(i), ))
exit(-1) print_message("OK.\n") words = [] print_message("Reading the words list... ") word_list = open(args.wordslist) for line in word_list.readlines(): word = line.strip() if args.extension: word = "{}.{}".format(word, args.extension) words.append(word) word_list.close() print_message("OK.\n\tThe selected file contains " + str(len(words)) + " paths.\n") threads = 1 if args.threads: threads = int(args.threads) print_message("Hunting paths using " + str(threads) + " threads... just wait...\n") port = 80 if args.port: port = int(args.port) starting = '/' if args.starting: starting = args.starting for portion in split_list(words, threads): threading.Thread(target=worker, args=(args.target, portion, starting, port, http, args.validation)).start()
def OverlappingTemplateMatching(e, m=6): n = len(e) # m = 6 # min 2, max 16, rec 9 or 10 N = 968 # fixed value as per spec M = 1032 # fixed, but could be: int(n/float(N)) outstr = "Overlapping Template Matching Test:\n" outstr += "n = %d\n" % n outstr += "N = %d\n" % N outstr += "M = %d\n" % M run_test = True if n < 1000000: run_test = False else: ce = list(e) ce = ce[:len(ce) - (len(ce) % M)] blocks = split_list(ce, M) templates = sorted(get_templates(m))[::-1] pi = [0.364091, 0.185659, 0.139381, 0.100571, 0.070432, 0.139865] p_lambda = (M - m + 1.0) / pow(2, m) p_eta = p_lambda / 2.0 p_l = [] first = True prevlen = 0 for tidx in xrange(len(templates)): B = templates[tidx] v = [0, 0, 0, 0, 0, 0] # given B for j in xrange(N): block = blocks[j] hits = 0 idx = 0 while idx < M - m: if block[idx:idx + m] == B: hits += 1 idx += 1 if block[idx:idx + m] == []: raise LengthError() if hits <= 5: v[hits] += 1 else: v[5] += 1 pre_Chi2_l = [] for i in xrange(6): pre_Chi2_l.append(pow((v[i] - N * pi[i]), 2) / (N * pi[i])) Chi2_obs = sum(pre_Chi2_l) p = igamc(5 / 2.0, Chi2_obs / 2.0) p_l.append(p) if not first: outstr += '\n----------------' if first: outstr += "lambda = %d\n" % p_lambda outstr += "%d templates to iterate over:\n" % len(templates) first = False outstr += "\nB = %s\n" % ''.join([str(i) for i in B]) outstr += "v = %s\n" % v outstr += "Chi2_obs = %.6f\n" % Chi2_obs outstr += "P-value = %.6f\n" % p if not run_test: outstr += 'Test not run: pre-test condition not met: ' outstr += 'n >= 1000000\n' open("./test_results/OverlappingTemplateMatching.txt", "w").write(outstr) if run_test: for p in p_l: if p < 0.01: return False return True