class FeatureContainer: def __init__(self, word_dict_path): # Load word list self.word_dict_path = word_dict_path self.word_list = [] with open(word_dict_path, 'r') as ins: for line in ins.readlines(): self.word_list.append(line.split()[1]) self.word_dict = {} for idx, ascword in enumerate(self.word_list): self.word_dict[ascword.decode('utf8')] = idx self.fb = FeatureBuilder(self.word_dict) self.smb = SimhashBuilder(self.word_list) print 'Loaded ', len(self.word_list), 'words' def compute_feature(self, token_list): new_words = [] for token in token_list: if not token in self.word_dict: new_words.append(token) if len(new_words) != 0: # Update word_list and word_dict self.fb.update_words(new_words) self.smb.update_words([word.encode('utf8') for word in new_words]) self.word_dict = self.fb.word_dict self.word_list.extend([word.encode('utf8') for word in new_words]) feature_vec = self.fb.compute(token_list) return feature_vec, self.smb.sim_hash(feature_vec)
class FeatureContainer: def __init__(self, word_dict_path): # Load word list self.word_dict_path = word_dict_path self.word_list = [] with open(word_dict_path, "r") as ins: for line in ins.readlines(): self.word_list.append(line.split()[1]) self.word_dict = {} for idx, ascword in enumerate(self.word_list): self.word_dict[ascword.decode("utf8")] = idx self.fb = FeatureBuilder(self.word_dict) self.smb = SimhashBuilder(self.word_list) print "Loaded ", len(self.word_list), "words" def compute_feature(self, token_list): new_words = [] for token in token_list: if not token in self.word_dict: new_words.append(token) if len(new_words) != 0: # Update word_list and word_dict self.fb.update_words(new_words) self.smb.update_words([word.encode("utf8") for word in new_words]) self.word_dict = self.fb.word_dict self.word_list.extend([word.encode("utf8") for word in new_words]) feature_vec = self.fb.compute(token_list) return feature_vec, self.smb.sim_hash(feature_vec)
with open(sys.argv[4], 'r') as ins: for line in ins.readlines(): doc_list.append(line.strip()) # Detection process begins min_sim = 64 min_docid = 0 with open(sys.argv[5], 'r') as ins: for lineidx, line in enumerate(ins.readlines()): if lineidx != 642: continue # Tokenize tokens = jt.tokens(line.strip().decode('utf8')) # Compute text feature feature = fb.compute(tokens) # Compute simhash fingerprint = smb.sim_hash(feature) result_list = [] for idx, fp in enumerate(fingerprint_list): sim = hamming_distance(fingerprint, fp, 64) result_list.append((sim, idx)) result_list = sorted(result_list, cmp=lambda x,y: cmp(x[0],y[0])) if result_list[0][0] < min_sim: min_sim, min_docid = result_list[0][0], lineidx #''' with open(sys.argv[6], 'w') as outs: outs.write(line.strip()+os.linesep) for sim, idx in result_list: outs.write('%s\t%s%s' %(sim, doc_list[idx], os.linesep)) #''' #if lineidx == 2: # break
with open(sys.argv[4], 'r') as ins: for line in ins.readlines(): doc_list.append(line.strip()) # Detection process begins min_sim = 64 min_docid = 0 with open(sys.argv[5], 'r') as ins: for lineidx, line in enumerate(ins.readlines()): if lineidx != 642: continue # Tokenize tokens = jt.tokens(line.strip().decode('utf8')) # Compute text feature feature = fb.compute(tokens) # Compute simhash fingerprint = smb.sim_hash(feature) result_list = [] for idx, fp in enumerate(fingerprint_list): sim = hamming_distance(fingerprint, fp, 64) result_list.append((sim, idx)) result_list = sorted(result_list, cmp=lambda x, y: cmp(x[0], y[0])) if result_list[0][0] < min_sim: min_sim, min_docid = result_list[0][0], lineidx #''' with open(sys.argv[6], 'w') as outs: outs.write(line.strip() + os.linesep) for sim, idx in result_list: outs.write('%s\t%s%s' % (sim, doc_list[idx], os.linesep)) #''' #if lineidx == 2: # break