def generateTrainFeatures(client_socket, infile, featurefile): #------------------------------------------------ doc = Document(infile) all_sentences, all_offset = doc.all_sentences() #------------------------------------------------ # Positive sentences pos_sents, offset = doc.section_sentences('abstract') sent_indices = range(offset, offset + len(pos_sents)) #----------------------------------------- # Sectional Ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sec_indices = sent2Section(doc, sent_indices) #----------------------------------------- # Count ranker #count_ranker = Ranker(all_sentences, tfidf=False) #----------------------------------------- for sentence, sent_idx, sec_idx in zip(pos_sents, sent_indices, sec_indices): feature_string = '+1' tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx, False) #feature_string += processTree(tree, count_ranker, sent_idx, True) writeToFile(featurefile, feature_string + '\n', 'a') #------------------------------------------------ # Negative sentences neg_ranker = TextRank(all_sentences) neg_ranker.rank() num = 5 x = -1 neg_sents = [] sent_indices = [] while num > 0: idx = neg_ranker.scores[x][0] + all_offset x -= 1 if not validSentence(doc[idx]): continue else: sent_indices.append(idx) neg_sents.append(doc[idx].sentence.encode('utf-8')) num -= 1 sec_indices = sent2Section(doc, sent_indices) #------------------------------------------------ for sentence, sent_idx, sec_idx in zip(neg_sents, sent_indices, sec_indices): feature_string = '-1' tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx, False) #feature_string += processTree(tree, count_ranker, sent_idx, True) writeToFile(featurefile, feature_string + '\n', 'a') #------------------------------------------------ print "All input files processed to create feature vectors for training."
def generateTestFeatures(client_socket, infile, featurefile): # ------------------------------------------------ doc = Document(infile) # ------------------------------------------------ # Load pickle for label picklefile = DIR["DATA"] + "test-labels-pickle" global test_labels with open(picklefile, "rb") as pfile: test_labels = pickle.load(pfile) # ------------------------------------------------ # For display and analysis dir, filename = os.path.split(infile) fcode = re.match(r"(.+)-parscit-section\.xml", filename).group(1) # ------------------------------------------------ test_sents, sent_indices = getRankedSent(doc, fcode) # ----------------------------------------- # Sectional Ranker sections = [] for sec, block in doc.document.items(): sentences = "" for key in sorted(block.keys()): sentences += str(block[key]) sections.append(sentences) sec_ranker = Ranker(sections) sec_indices = sent2Section(doc, sent_indices) # ----------------------------------------- for sentence, sent_idx, sec_idx in zip(test_sents, sent_indices, sec_indices): key = fcode + "-" + str(sent_idx) feature_string = test_data[key]["reallbl"] tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx) test_data[key]["depparse"] = getTree(tree) test_data[key]["features"] = feature_string writeToFile(featurefile, feature_string + "\n", "a")
def generateTestFeatures(client_socket, infile, featurefile): #------------------------------------------------ doc = Document(infile) #------------------------------------------------ # Load pickle for label picklefile = DIR['DATA'] + 'test-labels-pickle' global test_labels with open(picklefile, 'rb') as pfile: test_labels = pickle.load(pfile) #------------------------------------------------ # For display and analysis dir, filename = os.path.split(infile) fcode = re.match(r'(.+)-parscit-section\.xml', filename).group(1) #------------------------------------------------ test_sents, sent_indices = getRankedSent(doc, fcode) #----------------------------------------- # Sectional Ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sec_indices = sent2Section(doc, sent_indices) #----------------------------------------- for sentence, sent_idx, sec_idx in zip(test_sents, sent_indices, sec_indices): key = fcode + '-' + str(sent_idx) feature_string = test_data[key]['reallbl'] tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx, False) test_data[key]['depparse'] = getTree(tree) test_data[key]['features'] = feature_string writeToFile(featurefile, feature_string + '\n', 'a')
def classifyDoc(document): featurefile = DIR['DATA'] + 'features_svm.txt' classify = DIR['BASE'] + "lib/svm-light/svm_classify" model = DIR['DATA'] + "sec-tfidf-model.txt" outfile = DIR['DATA'] + "svm-out-sent.txt" #sumlength = 5 client_socket = getConnection() doc = Document(document) #----------------------------------------- # Clubbing sentences in sections and passing to the ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sents, offset = doc.all_sentences() ranker = TextRank(sents) ranker.rank() #----------------------------------------- sents, sent_indices = getSecRankedSent(doc) #----------------------------------------- # The sent_idx needs to be converted to reflect the corresponding # section index sec_indices = sent2Section(doc, sent_indices) summary = [] classified = [] sum_len = 0 for sent, sec_idx in zip(sents, sec_indices): #----------------------------------------- # dependency parse tree = parseTrees(getDepParse(client_socket, sent)) #----------------------------------------- deleteFiles([featurefile]) feature_string = "+1" feature_string += processTree(tree, sec_ranker, sec_idx, False) writeToFile(featurefile, feature_string + '\n', 'a') deleteFiles([outfile]) subprocess.call([classify, featurefile, model, outfile]) with open(outfile, 'r') as ofile: sent_val = float(ofile.read().strip()) classified.append((sent, sent_val)) for sent, val in sorted(classified, key=itemgetter(1)): summary.append(sent) sum_len += len(sent.split(' ')) if sum_len > 130: break writeToFile(DIR['DATA'] + "svm_summary.txt", '\n'.join(summary), 'w') print '\n'.join(summary)
def classifyDoc(document): featurefile = DIR['DATA'] + 'features_svm.txt' classify = DIR['BASE'] + "lib/svm-light/svm_classify" model = DIR['DATA'] + "sec-tfidf-model.txt" outfile = DIR['DATA'] + "svm-out-sent.txt" #sumlength = 5 client_socket = getConnection() doc = Document(document) #----------------------------------------- # Clubbing sentences in sections and passing to the ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sents, offset = doc.all_sentences() ranker = TextRank(sents) ranker.rank() looper = 20 num = 10 x = 0 summary = [] sent_idx = [0] sum_len = 0 while num > 0: idx = ranker.scores[x][0] + offset x += 1 if not validSentence(doc[idx]): continue elif doc.get_section_name(idx) == 'abstract': continue sent_idx[0] = idx #----------------------------------------- # dependency parse tree = parseTrees(getDepParse(client_socket, doc[idx].sentence.encode('utf-8'))) #----------------------------------------- # The sent_idx needs to be converted to reflect the corresponding # section index sec_idx = sent2Section(doc, sent_idx) #----------------------------------------- deleteFiles([featurefile]) feature_string = "+1" feature_string += processTree(tree, sec_ranker, sec_idx[0], False) writeToFile(featurefile, feature_string + '\n', 'a') deleteFiles([outfile]) subprocess.call([classify, featurefile, model, outfile]) with open(outfile, 'r') as ofile: sent_val = float(ofile.read().strip()) if sent_val > 0: summary.append(doc[idx].sentence.encode('utf-8')) num -= 1 sum_len += len(doc[idx].sentence.encode('utf-8').split(' ')) if sum_len > 130: break looper -= 1 if looper == 0: print "Looper Done" break writeToFile(DIR['DATA'] + "svm_summary.txt", '\n'.join(summary), 'w') print '\n'.join(summary)
def classifyDoc(document): featurefile = DIR['DATA'] + 'features_svm.txt' classify = DIR['BASE'] + "lib/svm-light/svm_classify" model = DIR['DATA'] + "sec-tfidf-model.txt" outfile = DIR['DATA'] + "svm-out-sent.txt" #sumlength = 5 client_socket = getConnection() doc = Document(document) #----------------------------------------- # Clubbing sentences in sections and passing to the ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sents, offset = doc.all_sentences() ranker = TextRank(sents) ranker.rank() looper = 20 num = 10 x = 0 summary = [] sent_idx = [0] sum_len = 0 while num > 0: idx = ranker.scores[x][0] + offset x += 1 if not validSentence(doc[idx]): continue elif doc.get_section_name(idx) == 'abstract': continue sent_idx[0] = idx #----------------------------------------- # dependency parse tree = parseTrees( getDepParse(client_socket, doc[idx].sentence.encode('utf-8'))) #----------------------------------------- # The sent_idx needs to be converted to reflect the corresponding # section index sec_idx = sent2Section(doc, sent_idx) #----------------------------------------- deleteFiles([featurefile]) feature_string = "+1" feature_string += processTree(tree, sec_ranker, sec_idx[0], False) writeToFile(featurefile, feature_string + '\n', 'a') deleteFiles([outfile]) subprocess.call([classify, featurefile, model, outfile]) with open(outfile, 'r') as ofile: sent_val = float(ofile.read().strip()) if sent_val > 0: summary.append(doc[idx].sentence.encode('utf-8')) num -= 1 sum_len += len(doc[idx].sentence.encode('utf-8').split(' ')) if sum_len > 130: break looper -= 1 if looper == 0: print "Looper Done" break writeToFile(DIR['DATA'] + "svm_summary.txt", '\n'.join(summary), 'w') print '\n'.join(summary)
def generateTrainFeatures(client_socket, infile, featurefile): #------------------------------------------------ doc = Document(infile) all_sentences, all_offset = doc.all_sentences() #------------------------------------------------ # For display and analysis dir, filename = os.path.split(infile) fcode = re.match(r'(.+)-parscit-section\.xml', filename).group(1) #------------------------------------------------ #------------------------------------------------ # Positive sentences pos_sents, offset = doc.section_sentences('abstract') sent_indices = range(offset, offset + len(pos_sents)) #----------------------------------------- # Sectional Ranker sections = [] for sec, block in doc.document.items(): sentences = '' for key in sorted(block.keys()): sentences += (str(block[key])) sections.append(sentences) sec_ranker = Ranker(sections) sec_indices = sent2Section(doc, sent_indices) #----------------------------------------- # Count ranker #count_ranker = Ranker(all_sentences, tfidf=False) #----------------------------------------- for sentence, sent_idx, sec_idx in zip(pos_sents, sent_indices, sec_indices): key = fcode + '-' + str(sent_idx) feature_string = '+1' tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx, 1, False) train_data[key] = {'sentence': doc[sent_idx].sentence.encode('utf-8'), 'reallbl': '+1', 'features': feature_string} writeToFile(featurefile, feature_string + '\n', 'a') #------------------------------------------------ # Negative sentences neg_ranker = TextRank(all_sentences) neg_ranker.rank() num = 5 x = -1 neg_sents = [] sent_indices = [] while num > 0: idx = neg_ranker.scores[x][0] + all_offset x -= 1 if not validSentence(doc[idx]): continue else: sent_indices.append(idx) neg_sents.append(doc[idx].sentence.encode('utf-8')) num -= 1 sec_indices = sent2Section(doc, sent_indices) #------------------------------------------------ for sentence, sent_idx, sec_idx in zip(neg_sents, sent_indices, sec_indices): key = fcode + '-' + str(sent_idx) feature_string = '-1' tree = parseTrees(getDepParse(client_socket, sentence)) feature_string += processTree(tree, sec_ranker, sec_idx, 1, False) train_data[key] = {'sentence': doc[sent_idx].sentence.encode('utf-8'), 'reallbl': '-1', 'features': feature_string} writeToFile(featurefile, feature_string + '\n', 'a') #------------------------------------------------ print "All input files processed to create feature vectors for training."