def getgoldevents(): zparModel = ZPar('english-models') #tagger = zparModel.get_tagger() depparser = zparModel.get_depparser() #stemmer = PorterStemmer() wordnet_lemmatizer = WordNetLemmatizer() gevents = file("../ni_data/event_descriptions.tsv").readlines() gevents = [line.strip().split("\t")[1].strip("\"") for line in gevents] gold_events = [] for line in gevents: parsed_sent = depparser.dep_parse_sentence(line) items = parsed_sent.strip().split("\n") items = [item.strip().split("\t") for item in items] words = [item[0] for item in items] tags = [item[1].lower() for item in items] links = [int(item[2]) for item in items] deps = [item[3].lower() for item in items] valid_words = [ words[idx] for idx, tag in enumerate(tags) if tag[:2] in ["nn", "vb", "jj", "cd", "rb"] if deps[idx] in ["root", "sub", "obj", "vc", "vmod", "nmod", "pmod"] ] #stemmed_words = [stemmer.stem(word.lower()) for word in valid_words if word not in ["is", "are", "a", "an", "be", "had", "ha"]] stemmed_words = [ wordnet_lemmatizer.lemmatize(word.lower()) for word in valid_words if word not in ["is", "are", "a", "an", "be", "had", "ha"] ] print "-gold", stemmed_words gold_events.append(list(set(stemmed_words))) return gold_events
def setUp(): """ set up things we need for the tests """ global z, depparser assert 'ZPAR_MODEL_DIR' in os.environ model_dir = os.environ['ZPAR_MODEL_DIR'] z = ZPar(model_dir) depparser = z.get_depparser()
class StoppableServer(_baseclass): allow_reuse_address = True def __init__(self, addr, zpar_model_path, model_list, *args, **kwds): # store the hostname and port number self.myhost, self.myport = addr # store the link to the loaded zpar object self.z = ZPar(zpar_model_path) # initialize the parent class _baseclass.__init__(self, addr, *args, **kwds) # Call the individual loading functions # and only register the appropriate methods if 'tagger' in model_list: tagger = self.z.get_tagger() self.register_function(tagger.tag_sentence) self.register_function(tagger.tag_file) if 'parser' in model_list: parser = self.z.get_parser() self.register_function(parser.parse_sentence) self.register_function(parser.parse_file) self.register_function(parser.parse_tagged_sentence) self.register_function(parser.parse_tagged_file) if 'depparser' in model_list: parser = self.z.get_depparser() self.register_function(parser.dep_parse_sentence) self.register_function(parser.dep_parse_file) self.register_function(parser.dep_parse_tagged_sentence) self.register_function(parser.dep_parse_tagged_file) # register the function to remotely stop the server self.register_function(self.stop_server) self.quit = False def serve_forever(self): while not self.quit: try: self.handle_request() except KeyboardInterrupt: print("\nKeyboard interrupt received, exiting.") break self.z.close() self.server_close() def stop_server(self): self.quit = True return 0, "Server terminated on host %r, port %r" % (self.myhost, self.myport)
def extractStockNews(stock_newsDir, symCompHash, sentNUM): snp_comp = symCompHash.values() zparModel = ZPar('english-models') #tagger = zparModel.get_tagger() depparser = zparModel.get_depparser() stemmer = PorterStemmer() dayNews = [] for dayDir in sorted(os.listdir(stock_newsDir)): if len(dayDir) != 10: continue #if int(dayDir[-2:]) > 5: continue #if dayDir != "2015-04-30": continue newsContents = set() for newsfile in sorted(os.listdir(stock_newsDir + dayDir)): #print "##############################################################" content = open(stock_newsDir + dayDir + "/" + newsfile, "r").read() printable = set(string.printable) content = filter(lambda x: x in printable, content) #print content #print "##############################################################" #sents = get_valid_news_content(content) sents = get_valid_1stpara_news(content) if sents is None: continue headline = re.sub("^(rpt )?update\s*\d+\s", "", "###".join(sents[:sentNUM]).lower()) headline = re.sub("\s+", " ", headline) newsContents.add(headline) oneDayNews = [] # [(matchedSNPComp, headline), ...] # matchedSNPComp: [(matchedPart, WholeCompName), ...] fullNameNum = 0 doubtCounter = Counter() if 0: print "\n".join(sorted(list(newsContents))) continue newsHash = {} headlineCompHash = {} for headline in newsContents: fullMatch = findComp_name(headline.replace("###", " "), snp_comp) #symMatch = [(word, symCompHash[word]) for word in headline.replace("###", " ").split() if word in symCompHash and word not in ["a", "an", "has"]] symMatch = [ word for word in headline.replace("###", " ").split() if word in [ "ge", "gt", "gm", "aig", "cvs", "oi", "adm", "jpm", "twc", "cvc", "se" ] ] symMatch = list([symCompHash[sym] for sym in set(symMatch)]) if fullMatch is not None or len(symMatch) > 0: if 0: print "---------------------------" print fullMatch, symMatch print headline continue if fullMatch is not None: symMatch.extend(fullMatch) headlineCompHash[headline] = symMatch # get valid words in headline parsed_sents = [ depparser.dep_parse_sentence(sent) for sent in headline.split("###") ] triples = frmTriple(parsed_sents, None) triples = [ stemmer.stem(word) for word in triples if word not in [":", "(", ")", ",", ".", "\"", "'"] ] sortedText = " ".join(sorted(triples)) if sortedText not in newsHash: newsHash[sortedText] = headline for impText, headline in newsHash.items(): fullNameNum += 1 oneDayNews.append( (headlineCompHash[headline], headline, impText.split())) #doubtMatch = [matchedComp[idx] for idx in range(len(matchedComp)) if matchScore[idx] > 0.33 and matchScore[idx] < 0.66] #wrongMatch = [matchedComp[idx] for idx in range(len(matchedComp)) if matchScore[idx] <= 0.33] #print "full", fullNameNum, len(newsContents), round(float(fullNameNum)/len(newsContents), 2) print "## Stock news extracting done in day", dayDir, " #snp_matched", fullNameNum, " out of all", len( newsContents), time.asctime() dayNews.append(oneDayNews) #break return dayNews
def extractStockNews(stock_newsDir, symCompHash, sentNUM): snp_comp = symCompHash.values() zparModel = ZPar('english-models') #tagger = zparModel.get_tagger() depparser = zparModel.get_depparser() stemmer = PorterStemmer() dayNews = [] for dayDir in sorted(os.listdir(stock_newsDir)): if len(dayDir) != 10: continue #if int(dayDir[-2:]) > 5: continue #if dayDir != "2015-04-30": continue newsContents = set() for newsfile in sorted(os.listdir(stock_newsDir + dayDir)): #print "##############################################################" content = open(stock_newsDir + dayDir + "/" + newsfile, "r").read() printable = set(string.printable) content = filter(lambda x:x in printable, content) #print content #print "##############################################################" #sents = get_valid_news_content(content) sents = get_valid_1stpara_news(content) if sents is None: continue headline = re.sub("^(rpt )?update\s*\d+\s", "", "###".join(sents[:sentNUM]).lower()) headline = re.sub("\s+", " ", headline) newsContents.add(headline) oneDayNews = [] # [(matchedSNPComp, headline), ...] # matchedSNPComp: [(matchedPart, WholeCompName), ...] fullNameNum = 0 doubtCounter = Counter() if 0: print "\n".join(sorted(list(newsContents))) continue newsHash = {} headlineCompHash = {} for headline in newsContents: fullMatch = findComp_name(headline.replace("###", " "), snp_comp) #symMatch = [(word, symCompHash[word]) for word in headline.replace("###", " ").split() if word in symCompHash and word not in ["a", "an", "has"]] symMatch = [word for word in headline.replace("###", " ").split() if word in ["ge", "gt", "gm", "aig", "cvs", "oi", "adm", "jpm", "twc", "cvc", "se"]] symMatch = list([symCompHash[sym] for sym in set(symMatch)]) if fullMatch is not None or len(symMatch) > 0: if 0: print "---------------------------" print fullMatch, symMatch print headline continue if fullMatch is not None: symMatch.extend(fullMatch) headlineCompHash[headline] = symMatch # get valid words in headline parsed_sents = [depparser.dep_parse_sentence(sent) for sent in headline.split("###")] triples = frmTriple(parsed_sents, None) triples = [stemmer.stem(word) for word in triples if word not in [":", "(", ")", ",", ".", "\"", "'"]] sortedText = " ".join(sorted(triples)) if sortedText not in newsHash: newsHash[sortedText] = headline for impText, headline in newsHash.items(): fullNameNum += 1 oneDayNews.append((headlineCompHash[headline], headline, impText.split())) #doubtMatch = [matchedComp[idx] for idx in range(len(matchedComp)) if matchScore[idx] > 0.33 and matchScore[idx] < 0.66] #wrongMatch = [matchedComp[idx] for idx in range(len(matchedComp)) if matchScore[idx] <= 0.33] #print "full", fullNameNum, len(newsContents), round(float(fullNameNum)/len(newsContents), 2) print "## Stock news extracting done in day", dayDir, " #snp_matched", fullNameNum, " out of all", len(newsContents), time.asctime() dayNews.append(oneDayNews) #break return dayNews