def tag_files(): path = "untagged/" tokenizer = nltk.data.load('tokenizers/punkt/PY3/english.pickle') files = FileReadingFuncts.get_files(path) for file in files: data = FileReadingFuncts.read_file(path, file) #tags the times entities_to_tag = TimeFunct.get_end_time_examples(data) data = tag_entities(data, entities_to_tag) #tags the named entitites data = tag_named_entities(data) #tags the sentences sentences = SentenceTaggingFuncts.get_sentences(data) entities_to_tag = get_in_dict(sentences, "sentence") data = tag_entities(data, entities_to_tag) #tags the paragraphs paragraphs = SentenceTaggingFuncts.get_paragraphs(data) entities_to_tag = get_in_dict(paragraphs, "paragraph") data = tag_entities(data, entities_to_tag) FileWritingFuncts.writeTaggedFile(data, file)
def read_in_tags(): tag_names = [ "sentence", "paragraph", "speaker", "location", "etime", "stime" ] #creates a dictionary in which to store all the tag scores tag_scores = TagScoreCalculator.create_tag_dict(tag_names) #(I only have to read in the file nams from one directory because #they're the same for both) for tagFile in FileReadingFuncts.get_files("test_tagged/"): for tag_name in tag_names: #reads in all the tagged stuff from the tagged files test_tags = get_all_tags("test_tagged/", tagFile, tag_name) my_tags = get_all_tags("my_tagged/", tagFile, tag_name) #adds the tags I read in into my tag_score dictionary tag_scores[tag_name].add_tags(test_tags, my_tags) return tag_scores
def createsFiles(tag_names): #reads in the tags path = "training/" files = FileReadingFuncts.get_files(path) matches = getblank2d(len(tag_names)) for file in files: data = FileReadingFuncts.read_file(path, file) for i in range(0, len(tag_names)): newMatches = TagExtractingFuncts.find_tag_matches(tag_names[i], data) matches[i] = matches[i] + (newMatches) #writes the tags to the files for i in range(0, len(tag_names)): outputFile = "tagFiles/{}.txt".format(tag_names[i]) writeFile(set(matches[i]), outputFile)