Beispiel #1
0
def tag_files():
    path = "untagged/"
    tokenizer = nltk.data.load('tokenizers/punkt/PY3/english.pickle')
    files = FileReadingFuncts.get_files(path)
    for file in files:
        data = FileReadingFuncts.read_file(path, file)

        #tags the times
        entities_to_tag = TimeFunct.get_end_time_examples(data)
        data = tag_entities(data, entities_to_tag)

        #tags the named entitites
        data = tag_named_entities(data)

        #tags the sentences
        sentences = SentenceTaggingFuncts.get_sentences(data)
        entities_to_tag = get_in_dict(sentences, "sentence")
        data = tag_entities(data, entities_to_tag)

        #tags the paragraphs
        paragraphs = SentenceTaggingFuncts.get_paragraphs(data)
        entities_to_tag = get_in_dict(paragraphs, "paragraph")
        data = tag_entities(data, entities_to_tag)

        FileWritingFuncts.writeTaggedFile(data, file)
Beispiel #2
0
def get_all_tags(path, file, tag_name):
    #reads in the file:
    data = FileReadingFuncts.read_file(path, file)

    #finds all the matches
    tagged_matches = TagExtractingFuncts.find_tag_matches(tag_name, data)

    #removes the tags from every match
    untagged_matches = []
    for match in tagged_matches:
        match = match.replace(".", "")
        untagged_matches.append(TagExtractingFuncts.get_rid_of_tags(match))

    return untagged_matches
Beispiel #3
0
def createsFiles(tag_names):

    #reads in the tags
    path = "training/"
    
    files = FileReadingFuncts.get_files(path)

    matches = getblank2d(len(tag_names))

    for file in files:
        data = FileReadingFuncts.read_file(path, file)
        for  i in range(0, len(tag_names)):
            newMatches  = TagExtractingFuncts.find_tag_matches(tag_names[i], data)
            matches[i] = matches[i] + (newMatches)


    #writes the tags to the files
    for i in range(0, len(tag_names)):
        outputFile = "tagFiles/{}.txt".format(tag_names[i])
        writeFile(set(matches[i]), outputFile)