def rankConfOnTestSets(self, batch_size): inf = Inference(self.f, self.cm, self.max_len, batch_size) scores = [] paths = glob(self.home_tests) for path in paths: name = os.path.basename(path).split('-')[0] print(name) X, F = readPC(path, self.max_len - 1, encoding='ascii') R = rank(F) R = np.array(R) # apply model UP = inf.applyBatch(X, INCLUDE_END_SYMBOL=self.hparams['append_end']) UP = np.array(UP) score = getScore(UP, R) scores += [(name, score)] return scores
def rankConfOnTestSets(self, batch_size): inf = Inference(self.AE_model, self.charmap, self.max_len, batch_size) scores = [] paths = glob(self.home_tests) # 获得路径home_tests('./HOME/TESTs/*.txt') for path in paths: name = os.path.basename(path).split('-')[0] print(name) X, F = readPC(path, self.max_len - 1, encoding='ascii') rank_result = rank(F) # 去重并获得排序序号 rank_result = np.array(rank_result) # apply model UP = inf.applyBatch(X, INCLUDE_END_SYMBOL=self.hparams['append_end']) UP = np.array(UP) score = getScore(UP, rank_result) scores += [(name, score)] return scores
x = x[:-1] if len(x) <= MAX_LEN and len(x) >= MIN_LEN: X.append(x) return X def write_tsv(output, X, P, encoding=ENCODING): assert len(X) == len(P) n = len(X) with open(output, 'w', encoding=encoding) as f: for x, p in zip(X, P): print("%s\t%f" % (x, p), file=f) if __name__ == '__main__': try: model_path = sys.argv[1] password_file = sys.argv[2] output_path = sys.argv[3] except: print("USAGE: model_path.h5 password_path.txt output_path.txt") sys.exit(1) X = read_passwords(password_file) cm = myPickle.load(CHARMAP) model = tf.keras.models.load_model(model_path, compile=False) S = Inference(model, cm, MAX_LEN, BATCH_SIZE) logP = S.applyBatch(X, TERMINAL_SYMBOL) write_tsv(output_path, X, logP)
passwords: 口令列表 log_probability: 非归一化的概率列表(log) encoding: 编码 Returns: """ assert len(passwords) == len(log_probability) n = len(passwords) with open(output, 'w', encoding=encoding) as f: for x, p in zip(passwords, log_probability): print("%s\t%f" % (x, p), file=f) if __name__ == '__main__': try: model_path = sys.argv[1] password_file = sys.argv[2] output_path = sys.argv[3] except: print("USAGE: model_path.h5 password_path.txt output_path.txt") sys.exit(1) passwords = read_passwords(password_file) charmap = myPickle.load(CHARMAP) model = tf.keras.models.load_model(model_path, compile=False) infer = Inference(model, charmap, MAX_LEN, BATCH_SIZE) logP = infer.applyBatch(passwords, TERMINAL_SYMBOL) # 计算概率 write_tsv(output_path, passwords, logP)