Exemplo n.º 1
0
def run_crf(trainfile, testfile, model_file=None):

    maxlen = 100
    sents_train, tags_train, unique_words_train, unique_tags_train = \
        P.retrieve_sentences_tags(trainfile, maxlen=maxlen)
    sents_test, tags_test, unique_word_test, unique_tags_test = \
        P.retrieve_sentences_tags(testfile, maxlen=maxlen, allowedtags=unique_tags_train)

    train_data = []
    for n, st in enumerate(sents_train):
        s = []
        for m, _ in enumerate(st):
            s.append((unicode(sents_train[n][m], "utf-8")
                      , unicode(tags_train[n][m], "utf-8")))
        train_data.append(s)

    crf = CRFTagger()
    if model_file is None:
        crf.train(train_data, model_file='data/crf.mdl')
    else:
        crf.set_model_file(model_file)

    test_data = []
    for n, st in enumerate(sents_test):
        s = []
        for m, _ in enumerate(st):
            s.append((unicode(sents_test[n][m], "utf-8")
                      , unicode(tags_test[n][m], "utf-8")))
        test_data.append(s)

    print(crf.evaluate(test_data))
Exemplo n.º 2
0
Arquivo: pos.py Projeto: j-duff/cltk
 def tag_crf(self, untagged_string: str):
     """Tag POS with CRF tagger.
     :type untagged_string: str
     :param : An untagged, untokenized string of text.
     :rtype tagged_text: str
     """
     untagged_tokens = wordpunct_tokenize(untagged_string)
     pickle_path = self.available_taggers['crf']
     tagger = CRFTagger()
     tagger.set_model_file(pickle_path)
     tagged_text = tagger.tag(untagged_tokens)
     return tagged_text
Exemplo n.º 3
0
def ExtractItemsFromJudgment(text,CodeTaggerFile,TitleTaggerFile):
        text = removeHTMLTags(text)
        tokenList = tokenizeTestData(text) 
        CodesTagger = CRFTagger()
        TitleTagger = CRFTagger()
        
        CodesTagger.set_model_file(CodeTaggerFile)
        TitleTagger.set_model_file(TitleTaggerFile)
      
        taggedCodes =  CodesTagger.tag_sents(tokenList)
        taggedTitles = TitleTagger.tag_sents(tokenList)
        
        return extract_entities(taggedCodes,taggedTitles)
Exemplo n.º 4
0
def ExtractItemsFromJudgment(text):

        text = removeHTMLTags(text)
        
        tokenList = tokenizeTestData(text) 
        CodesTagger = CRFTagger()
        titleTagger = CRFTagger()
        
        CodesTagger.set_model_file("models/CRF-Model-OnlyCodes")
        titleTagger.set_model_file("models/CRF-Model-OnlyTitles")
      
        taggedCodes =  CodesTagger.tag_sents(tokenList)
        taggedTitles = titleTagger.tag_sents(tokenList)
       
        return extract_entities(taggedCodes,taggedTitles)
Exemplo n.º 5
0
def chunking(sents, chunked_file):
    '''
    Chunking
    param sents: 列表,如[['dog', 'is', 'dog'], ['dog', 'good']]
    '''
	
    os.chdir('/home/zqr/code/chunk2vec/')

    start_time = time.time()
    #PoS
    print '\n-->Start PoS'
    #print '->Training PoS Tagger'
    #ct = CRFTagger()
    #ct.train(chunk_traindata(pos_trainfile), 'model.crf.tagger')
    #print '->Done'
    
    #pos_testdata_gold = chunk_traindata(pos_testfile)
    
    # pos corpus
    print '->Load CRF Tagger model'
    ct = CRFTagger()
    ###这个model是从chunk任务中学习到的PoS标签
    ct.set_model_file('model.crf.tagger')
    print '->Posing'
    tagged_sents = ct.tag_sents(sents)
    #print 'PoS acc.:', ct.evaluate(pos_testdata_gold)
    #将PoS好的句子写文件
    print '->Write posed file'
    pos_data(tagged_sents, 'tmp_for_chunking')
    end_time = time.time()
    print '-->Done, Time:', end_time - start_time, 's'
    #节省时间,暂时用测试语料
    #pos_data(pos_testdata_gold, chunk_inputfile)
        
    start_time = time.time()
    ###Chunk,依赖系统安装YamCha,训练语料就用CoNLL的训练语料
    print '\n-->Start Chunking'
    os.system('yamcha-config --libexecdir')
    #os.chdir('/home/zqr/code/sent2vec/')
    os.system('cp /home/zqr/local/libexec/yamcha/Makefile .')
    #训练chunking模型
    #os.system('make CORPUS=' + pos_trainfile +' MODEL=chunk_model train')
    os.system('yamcha -m chunk_model.model < tmp_for_chunking > ' + chunked_file)
    print '-->Done, Time:', time.time() - start_time, 's'
from flask import Flask, request, jsonify
import os
import nltk
from nltk.tag import CRFTagger
import numpy as np

app = Flask(__name__)

ct = CRFTagger()
ct.set_model_file(
    os.path.dirname(os.path.abspath(__file__)) +
    '/all_indo_man_tag_corpus_model.crf.tagger')


@app.route('/', methods=['POST'])
def process():
    # Tokenize input text
    input_text = nltk.word_tokenize(request.form.get('input', ''))

    # Tag sentence
    result = ct.tag_sents([input_text])

    # Remove unwanted elements
    forbidden_tags = ['SC', 'IN', 'CC']
    for index, sentence in enumerate(result):
        result[index] = [
            word for word in sentence if word[1] not in forbidden_tags
        ]

    # Assemble output
    output = ''
 def postag_sequence(self, data):
     ct = CRFTagger()
     ct.set_model_file(POSTAG_MODEL_DIR)
     data["postag_seq"] = ct.tag_sents([data["preprocessed_kalimat"]])[0]
Exemplo n.º 8
0
    # "CHARGE": "CHARGE",
    # "APERTURE": "APERTURE",
    # "STRENGTH": "STRENGTH",
    # "FRAGRANT": "FRAGRANT",
    # "KEY TYPE": "KEY TYPE",
    # "KEY BRAND": "KEY BRAND",
    # "KEY_TYPE": "KEY_TYPE",
    # "KEY_BRAND": "KEY_BRAND",
    # "KEY_NAME": "KEY_NAME",
    # "KEY_OS": "KEY_OS",
}

TAGGER3 = CRFTagger()

TAGGER3.set_model_file(
    os.path.abspath(
        'server/nlp/data/all_indo_man_tag_corpus_model.crf.tagger'))


def getPOSTag(_temporary_tokens):
    strin = []

    for token_tag in _temporary_tokens:
        if token_tag[0].encode('ascii', 'ignore').decode('utf8'):
            strin.append(token_tag[0].encode('ascii', 'ignore').decode('utf8'))
    return [(str(token.encode('ascii', 'ignore'),
                 'utf8'), str(tag.encode('ascii', 'ignore'), 'utf8'))
            for (token, tag) in TAGGER3.tag_sents([strin])[0]]


def getPOSTagTesting(_temporary_tokens):
Exemplo n.º 9
0
try:
    import pycrfsuite
except ImportError:
    pass
    print "ga ketemu"

MAP_ENTITY_TAG = {
    "ORGANIZATION": "organization",
    "LOCATION": "location",
    "PERSON": "person",
    "TIME": "time",
    "QUANTITY": "quantity"
}

TAGGER3 = CRFTagger()
TAGGER3.set_model_file('data/all_indo_man_tag_corpus_model.crf.tagger')


def getPOSTag(_temporary_tokens):
    strin = []
    for token_tag in _temporary_tokens:
        strin.append(unicode(token_tag[0].decode('utf-8')))
    return [(token.encode('ascii', 'ignore'), tag.encode('ascii', 'ignore'))
            for (token, tag) in TAGGER3.tag_sents([strin])[0]]


def parseEntityName(_sent):
    def getTypeData(_ne):
        """
		ekstrak jenis Name Entity
		"""
Exemplo n.º 10
0
def onsentencelist():

    ct = CRFTagger()
    """nertweetlist contains ner-tagged tweets"""
    nertweetlist = pickle.load(open("nertweetlist.pickle", "rb"))
    print(sorted(nertweetlist)[0:5])
    print(len(nertweetlist))
    """tweetlist contains the plain tweets """
    tweetlist = pickle.load(open('tweetlist.pickle', 'rb'))
    print(len(tweetlist))
    print(sorted(tweetlist)[0:5])
    """training size as percentage"""
    trainingsize = 0.9
    """ calculate where to split data """
    limit = round(trainingsize * len(nertweetlist))
    """train the data / choose one of the 2 blocks """
    #train_data = nertweetlist[:limit]
    #ct.train(train_data,'tweetmodel.crf.tagger')
    ct.set_model_file('tweetmodel.crf.tagger')
    """Test data and evaluate"""
    test_data = tweetlist[limit:]
    ct.tag_sents(test_data)  # tagging sentences
    gold_sentences = nertweetlist[limit:]
    print("\nAccuracy:", ct.evaluate(gold_sentences))
    """ TURN TRAINED TAGGED LIST AND TEST LIST INTO ONE LIST CONTAINING
        ONLY THE TRUE AND PREDTAGS"""
    pred_nerlist = []
    for sentence in tweetlist[:limit]:
        #print("DIT:", sentence)
        for (word, nertag) in ct.tag(sentence):
            pred_nerlist.append(nertag.lower())

    true_nerlist = []
    #ct_true = gold_sentences
    for sentence in nertweetlist[:limit]:
        for (word, nertag) in sentence:
            #true_nerlist.append((word,nertag))
            true_nerlist.append(nertag.lower())
    """ Print baseline """
    #TODO: calulate baseline
    """"Print F-score and confusion matrix """
    print("\nF-score (micro):",
          f1_score(true_nerlist, pred_nerlist, average='micro'))
    print("\nF-score (macro):",
          f1_score(true_nerlist, pred_nerlist, average='macro'))
    print("\nF-score (weigthed):",
          f1_score(true_nerlist, pred_nerlist, average='weighted'))
    print(
        "\nF-score (None):",
        f1_score(true_nerlist,
                 pred_nerlist,
                 average=None,
                 labels=[
                     "o", "b-per", "i-per", "b-loc", "i-loc", "b-org", "i-org",
                     "b-misc", "i-misc"
                 ]))

    print("\nConfusion matrix:\n")
    for item in [
            "O", "B-per", "I-per", "B-loc", "I-loc", "B-org", "I-org",
            "B-misc", "I-misc"
    ]:
        print("  ", item, end="")
    print(
        "\n",
        confusion_matrix(true_nerlist,
                         pred_nerlist,
                         labels=[
                             "o", "b-per", "i-per", "b-loc", "i-loc", "b-org",
                             "i-org", "b-misc", "i-misc"
                         ]))
Exemplo n.º 11
0
def get_sentimen(doc):
    ct = CRFTagger()
    ct.set_model_file('all_indo_man_tag_corpus_model.crf.tagger')

    # doc=remove_hashtag(doc)
    # doc=clean_tweet(doc)
    doc = utils.cleanAllTweet(doc)
    #print(doc)
    #pisah perkalimat
    sentences = nltk.sent_tokenize(doc)
    #pisah per kata
    stokens = [nltk.word_tokenize(sent) for sent in sentences]
    #tag indonesia
    taggedlist = ct.tag_sents(stokens)
    # print(taggedlist)

    #sentiword Indonesia
    barasa = pd.read_csv(
        'barasa-ID.txt',
        delimiter='\t',
        encoding='utf-8',
        header=0,
        names=['syn', 'goodness', 'lemma', 'pos', 'neg', 'dummy'])
    score_list = []
    negasi = ""
    for idx, taggedsent in enumerate(taggedlist):
        score_list.append([])
        for idx2, t in enumerate(taggedsent):
            newtag = ''

            if t[1].startswith('NN'):
                newtag = 'n'
            elif t[1].startswith('JJ'):
                newtag = 'a'
            elif t[1].startswith('VB'):
                newtag = 'v'
            elif t[1].startswith('R'):
                newtag = 'r'
            elif t[1].startswith('NEG'):
                negasi = t[0]
            else:
                newtag = ''
            if (newtag != ''):
                if (negasi != ""):
                    kalimat = negasi + ' ' + t[0]
                    negasi = ""
                else:
                    kalimat = t[0]
                lemmas = barasa[barasa['lemma'].str.contains(kalimat,
                                                             na=False)]
                score_list[idx].append(get_scores(lemmas))
    #sentence_sentiment=[]
    totalscore = 0.0
    for score_sent in score_list:
        scoresentnow = sum([word_score
                            for word_score in score_sent]) / len(score_sent)
        #sentence_sentiment.append(score_sent)
        totalscore = scoresentnow + totalscore
    sentimenScore = totalscore / len(score_list)
    print("Score sentimen = " + str(sentimenScore))
    return sentimenScore
Exemplo n.º 12
0
# -*- coding: utf-8 -*-
from nltk.tag import CRFTagger
from pythainlp.tokenize import word_tokenize
ct = CRFTagger()
ct.set_model_file('model.crf.tagger')
text = ""
while text != "exit":
    text = input("Text : ")
    post = word_tokenize(text, 'icu')
    print(ct.tag_sents([post]))
Exemplo n.º 13
0
class Chunker:
    UNIQ = '_UNIQUE_STRING_'
    CHUNK_PARSER = None
    """
	"""
    def __init__(self):
        # Memuat data pre-trained POS-Tagger
        uni, bi, tri, word = self.load_obj("tagger")
        self.TAGGER1 = Tagger(uni, bi, tri, word)

        # Memuat data pre-trained POS-Tagger
        uni2, bi2, tri2, word2 = self.load_obj("tagger2")
        self.TAGGER2 = Tagger(uni2, bi2, tri2, word2)

        self.TAGGER3 = CRFTagger()
        self.TAGGER3.set_model_file(
            'dataset/all_indo_man_tag_corpus_model.crf.tagger')

        # Memuat data grammar chunker
        self.load_chunker()

    """
	"""

    def load_obj(self, name):
        with open('obj/' + name + '.pkl', 'rb') as f:
            return pickle.load(f)

    """
	Melakukan formatting string menjadi regex
	"""

    def format_to_re(self, format):
        parts = (format % MarkPlaceholders()).split(self.UNIQ)
        for i in range(0, len(parts), 2):
            parts[i] = re.escape(parts[i])

        return ' '.join(parts).replace('\\', '')

    """
	Mengubah tree POS Tag menjadi tree chunk
	"""

    def tree_to_str(self, tree_data):
        ne_in_sent = []
        for subtree in tree_data:
            if type(subtree
                    ) == Tree:  # If subtree is a noun chunk, i.e. NE != "O"
                ne_label = subtree.label()
                ne_string = " ".join(
                    [token for token, pos in subtree.leaves()])
                ne_in_sent.append((ne_string, ne_label))
            else:
                ne_in_sent.append((subtree[0], subtree[1]))

        return ne_in_sent

    """
	Memuat rule chunk
	"""

    def load_chunker(self):
        try:
            f = open('dataset/phrase_chunker_grammar_id.txt')
            files = self.format_to_re(f.read())
            grammars = files
            f.close()

            self.CHUNK_PARSER = nltk.RegexpParser(grammars)

        except Exception as e:
            print str(e)

    """
	Mengubah tree chunk menjadi list of chunk
	dalam bentuk list of string
	"""

    def get_only_str(self, tree_chunk):
        output = []
        for chunk, tag in tree_chunk:
            output.append(chunk)

        return output

    """
	Mengubah list of chunk(string) menjadi string
	dengan format: [chunk1] [chunk2] ... [chunkN]
	"""

    def beautify(self, chunks):
        strout = ""
        for s in chunks:
            strout += "[" + s + "] "

        return strout

    """
	Memberi POSTag pada setiap kata pada kalimat
	Melakukan chunking kalimat
	Mengembalikan chunk Tree
	"""

    def chunk_me1(self, _str):
        return self.CHUNK_PARSER.parse(
            self.TAGGER1.tagSentence(_str.split(" ")))

    """
	Memberi POSTag pada setiap kata pada kalimat
	Melakukan chunking kalimat
	Mengembalikan chunk Tree
	"""

    def chunk_me2(self, _str):
        return self.CHUNK_PARSER.parse(
            self.TAGGER2.tagSentence(_str.split(" ")))

    """
	"""

    def chunk_me3(self, _str):
        _strs = _str.split(" ")
        strs = []
        for s in _strs:
            strs.append(unicode(s))

        return self.CHUNK_PARSER.parse(self.TAGGER3.tag_sents([strs])[0])
Exemplo n.º 14
0
    # test_sents = [[ele[0] for ele in sent] for sent in test_sents]
    # from dpattack.libs.luna import time_record
    # with time_record():
    #     for i in range(2048):
    #         tagger.tag_sents(test_sents[i:i+1])  # crf 0.915 seconds 0.723   0.526  0.283
    # with time_record():
    #     tagger.tag_sents(test_sents[:2048])      # crf 0.936 seconds 0.661   0.495  0.278

    # tagger = nltk.BigramTagger([[('the', 'dt'), ('work', 'nn'), ('of', 'in')]])
    # print(tagger.tag_sents([('the', 'end', 'of')]))

    sent = [
        "at <UNK> p.m. , at the throw of the `` cooling off '' period , the average was down <UNK> points .".split(" ")]

    tagger = CRFTagger()
    tagger.set_model_file(
        "/disks/sdb/zjiehang/zhou_data/saved_models/crftagger")
    print(tagger.tag_sents(sent))

    from dpattack.libs.luna import auto_create
    tagger = auto_create("trigram_tagger",
                         lambda: train_gram_tagger(
                             train_corpus, ngram=3),
                         cache=True, path='/disks/sdb/zjiehang/zhou_data/saved_vars')
    print(tagger.tag_sents(sent))


def gen_tag_dict(corpus: Corpus, vocab: Vocab,
                 threshold=3,
                 verbose=True):
    """
        Rule:
        "PRON VERB NOUN VERB ADP DET NOUN")
    """

    # Extract features from words in the given text
    features = generateUtterancesFeatures(text)
    # Predict tags for the given utterance
    tags = crf.predict(features)
    return ' '.join(str(t) for w in tags for t in w)


# CRF POS TAGGING - PRE-TRAINED POS-TAGGER
# Path to the pre-trained POS-tagger
TAGGER_PATH = "crfpostagger"
# Initialize tagger
tagger = CRFTagger()
tagger.set_model_file(TAGGER_PATH)

#def tag_text_CRF(text):
#    """
#    Function used for tagging the given text. This function uses a CRF predefined pos tagger.
#
#    :param text: text to be associated with the POS tags
#    :return: string containing POS tags for the given text; each tag refers to the word at the same index in the sentence
#        (eg. for the sentence "My dog likes running around the garden." the returned string with tags is
#        "PRP$ NN VBZ VBG IN DT NN")
#    """
#
#    tags= tagger.tag([word.lower() for word in text.split()])
#    return ' '.join(str(t) for w,t in tags)

# HMM POS TAGGING
Exemplo n.º 16
0
from nltk.tag import CRFTagger

ct = CRFTagger()
ct.set_model_file('pos-tagger-indonesia-model.tagger')

hasil = ct.tag_sents([['Saya', 'bekerja', 'di', 'Bandung'],
                      ['Nama', 'saya', 'Yudi']])
print(hasil)
Exemplo n.º 17
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Readers for the pke module."""

import xml.etree.ElementTree as etree
import spacy

from pke.data_structures import Document
from nltk.tag import CRFTagger
from nltk.tokenize import sent_tokenize, word_tokenize, TweetTokenizer
from nltk.corpus import stopwords
import string
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
ct = CRFTagger()
ct.set_model_file('./all_indo_man_tag_corpus_model.crf copy.tagger')
factory = StemmerFactory()
stemmer = factory.create_stemmer()
tokenizer_words = TweetTokenizer()


class Reader(object):
    def read(self, path):
        raise NotImplementedError


class MinimalCoreNLPReader(Reader):
    """Minimal CoreNLP XML Parser."""
    def __init__(self):
        self.parser = etree.XMLParser()

    def read(self, path, **kwargs):
Exemplo n.º 18
0
from nltk.tag import CRFTagger
import nltk

import credentials_var as cred

ct = CRFTagger()
ct.set_model_file('../references/all_indo_man_tag_corpus_model.crf.tagger')


def pos_tagger(tokens):
    return ct.tag_sents([tokens])


for element in list(cred.find_all):
    text = element['extended_tweet']['full_text'] if element['truncated'] is True else element['text']
    print(pos_tagger(nltk.tokenize.word_tokenize(text)))
Exemplo n.º 19
0
from nltk.tag import CRFTagger

crflan = CRFTagger()
crf = CRFTagger()

crflan.set_model_file('model.crf.tagger')
crf.set_model_file('model1.crf.tagger')

print "Give a sentence..."
# Test
test_sent = raw_input()
test_sent = test_sent.encode('utf-8').decode('utf-8').split(' ')
print test_sent
half_ans = crflan.tag(test_sent)
print half_ans

# print test_sent
print crf.tag(test_sent)
Exemplo n.º 20
0
        prefix = []
        for word, pos in zip(word_pos_data[speaker][1],
                             word_pos_data[speaker][2]):
            prefix.append(word.replace("$unc$", ""))
            sp_data.append((unicode(word.replace("$unc$", "")
                                    .encode("utf8")),
                            unicode(pos.encode("utf8"))))
        training_data.append(deepcopy(sp_data))
    print "training tagger..."
    ct.train(training_data, TAGGER_PATH)


if TEST:
    print "testing tagger..."
    ct = CRFTagger()  # initialize tagger
    ct.set_model_file(TAGGER_PATH)
    dialogue_speakers = []
    for disf_file in DISFLUENCY_TEST_FILES:
        IDs, mappings, utts, pos_tags, labels = \
            load_data_from_disfluency_corpus_file(disf_file)
        dialogue_speakers.extend(sort_into_dialogue_speakers(IDs,
                                                             mappings,
                                                             utts,
                                                             pos_tags,
                                                             labels))
    word_pos_data = {}  # map from the file name to the data
    for data in dialogue_speakers:
        dialogue, a, b, c, d = data
        word_pos_data[dialogue] = (a, b, c, d)
    ct.tag([unicode(w) for w in "uh my name is john".split()])
    # either gather training data or test data
Exemplo n.º 21
0
# ---------------------------------------------------------------|
stop_words = set(stopwords.words('indonesian'))
words = word_tokenize(text)

new_sentence = []

for word in words:
    if word not in stop_words:
        new_sentence.append(word)

new_sentence = [
    unicode(new_sentence[x], "utf-8") for x in range(len(new_sentence))
]

ct = CRFTagger()
ct.set_model_file('all_indo_man_tag_corpus_model.crf.tagger')
hasil = ct.tag_sents([new_sentence])

# ---------------------------------------------------------------|
# untuk melihat frekuensi kata yang muncul
# ---------------------------------------------------------------|
# fdist = FreqDist(new_sentence)
# print(fdist.most_common())

# ---------------------------------------------------------------|
# untuk melihat frekuensi kata yang muncul
# ---------------------------------------------------------------|
for tokenTag in hasil[0]:
    token, tag = tokenTag
    token_text = unicodedata.normalize(u'NFKD',
                                       token).encode(u'ascii', u'ignore')
from nltk.tag import CRFTagger
from nltk.corpus import brown
from sklearn.metrics import classification_report as crf

ct = CRFTagger()

ct.set_model_file("model.crf.tagger")

brown_sents = brown.sents()
size = int(len(brown_sents) * 0.7)

test_sents = brown_sents[size:]

flat_list = []
for sublist in test_sents:
    for item in sublist:
        flat_list.append(item)

l = ct.tag(flat_list)
y_pred = []

for each in l:
    y_pred.append(each[1])

#print(y_pred[:10])

tagged_sents = brown.tagged_sents(tagset="universal")[size:]

y_true = []
for each in tagged_sents:
    for e in each:
Exemplo n.º 23
0
class DeepDisfluencyTagger(IncrementalTagger):
    """A deep-learning driven incremental disfluency tagger
    (and optionally utterance-segmenter).

    Tags each word with the following:
    <f/> - a fluent word
    <e/> - an edit term word, not necessarily inside a repair structure
    <rms id="N"/> - reparandum start word for repair with ID number N
    <rm id="N"/> - mid-reparandum word for repair N
    <i id="N"/> - interregnum word for repair N
    <rps id="N"/> - repair onset word for repair N
    <rp id="N"/> - mid-repair word for repair N
    <rpn id="N"/> - repair end word for substitution or repetition repair N
    <rpnDel id="N"/> - repair end word for a delete repair N

    If in joint utterance segmentation mode
    according to the config file,
    the following utterance segmentation tags are used:

    <cc/> - a word which continues the current utterance and whose
            following word will continue it
    <ct/> - a word which continues the current utterance and is the
            last word of it
    <tc/> - a word which is the beginning of an utterance and whose following
            word will continue it
    <tt/> - a word constituting an entire utterance
    """
    def __init__(self,
                 config_file=None,
                 config_number=None,
                 saved_model_dir=None,
                 pos_tagger=None,
                 language_model=None,
                 pos_language_model=None,
                 edit_language_model=None,
                 timer=None,
                 timer_scaler=None,
                 use_timing_data=False):

        if not config_file:
            config_file = os.path.dirname(os.path.realpath(__file__)) +\
                "/../experiments/experiment_configs.csv"
            config_number = 35
            print "No config file, using default", config_file, config_number

        super(DeepDisfluencyTagger, self).__init__(config_file, config_number,
                                                   saved_model_dir)
        print "Processing args from config number {} ...".format(config_number)
        self.args = process_arguments(config_file,
                                      config_number,
                                      use_saved=False,
                                      hmm=True)
        #  separate manual setting
        setattr(self.args, "use_timing_data", use_timing_data)
        print "Intializing model from args..."
        self.model = self.init_model_from_config(self.args)

        # load a model from a folder if specified
        if saved_model_dir:
            print "Loading saved weights from", saved_model_dir
            self.load_model_params_from_folder(saved_model_dir,
                                               self.args.model_type)
        else:
            print "WARNING no saved model params, needs training."
            print "Loading original embeddings"
            self.load_embeddings(self.args.embeddings)

        if pos_tagger:
            print "Loading POS tagger..."
            self.pos_tagger = pos_tagger
        elif self.args.pos:
            print "No POS tagger specified,loading default CRF switchboard one"
            self.pos_tagger = CRFTagger()
            tagger_path = os.path.dirname(os.path.realpath(__file__)) +\
                "/../feature_extraction/crfpostagger"
            self.pos_tagger.set_model_file(tagger_path)

        if self.args.n_language_model_features > 0 or \
                'noisy_channel' in self.args.decoder_type:
            print "training language model..."
            self.init_language_models(language_model, pos_language_model,
                                      edit_language_model)

        if timer:
            print "loading timer..."
            self.timing_model = timer
            self.timing_model_scaler = timer_scaler
        else:
            # self.timing_model = None
            # self.timing_model_scaler = None
            print "No timer specified, using default switchboard one"
            timer_path = os.path.dirname(os.path.realpath(__file__)) +\
                '/../decoder/timing_models/' + \
                'LogReg_balanced_timing_classifier.pkl'
            with open(timer_path, 'rb') as fid:
                self.timing_model = cPickle.load(fid)
            timer_scaler_path = os.path.dirname(os.path.realpath(__file__)) +\
                '/../decoder/timing_models/' + \
                'LogReg_balanced_timing_scaler.pkl'
            with open(timer_scaler_path, 'rb') as fid:
                self.timing_model_scaler = cPickle.load(fid)
                # TODO a hack
                # self.timing_model_scaler.scale_ = \
                #    self.timing_model_scaler.std_.copy()

        print "Loading decoder..."
        hmm_dict = deepcopy(self.tag_to_index_map)
        # add the interegnum tag
        if "disf" in self.args.tags:
            intereg_ind = len(hmm_dict.keys())
            interreg_tag = \
                "<i/><cc/>" if "uttseg" in self.args.tags else "<i/>"
            hmm_dict[interreg_tag] = intereg_ind  # add the interregnum tag

        # decoder_file = os.path.dirname(os.path.realpath(__file__)) + \
        #     "/../decoder/model/{}_tags".format(self.args.tags)
        noisy_channel = None
        if 'noisy_channel' in self.args.decoder_type:
            noisy_channel = SourceModel(self.lm,
                                        self.pos_lm,
                                        uttseg=self.args.do_utt_segmentation)
        self.decoder = FirstOrderHMM(
            hmm_dict,
            markov_model_file=self.args.tags,
            timing_model=self.timing_model,
            timing_model_scaler=self.timing_model_scaler,
            constraint_only=True,
            noisy_channel=noisy_channel)

        # getting the states in the right shape
        self.state_history = []
        self.softmax_history = []
        # self.convert_to_output_tags = get_conversion_method(self.args.tags)
        self.reset()

    def init_language_models(self,
                             language_model=None,
                             pos_language_model=None,
                             edit_language_model=None):
        clean_model_dir = os.path.dirname(os.path.realpath(__file__)) +\
            "/../data/lm_corpora"
        if language_model:
            self.lm = language_model
        else:
            print "No language model specified, using default switchboard one"
            lm_corpus_file = open(clean_model_dir +
                                  "/swbd_disf_train_1_clean.text")
            lines = [
                line.strip("\n").split(",")[1] for line in lm_corpus_file
                if "POS," not in line and not line.strip("\n") == ""
            ]
            split = int(0.9 * len(lines))
            lm_corpus = "\n".join(lines[:split])
            heldout_lm_corpus = "\n".join(lines[split:])
            lm_corpus_file.close()
            self.lm = KneserNeySmoothingModel(
                order=3,
                discount=0.7,
                partial_words=self.args.partial_words,
                train_corpus=lm_corpus,
                heldout_corpus=heldout_lm_corpus,
                second_corpus=None)
        if pos_language_model:
            self.pos_lm = pos_language_model
        elif self.args.pos:
            print "No pos language model specified, \
            using default switchboard one"

            lm_corpus_file = open(clean_model_dir +
                                  "/swbd_disf_train_1_clean.text")
            lines = [
                line.strip("\n").split(",")[1] for line in lm_corpus_file
                if "POS," in line and not line.strip("\n") == ""
            ]
            split = int(0.9 * len(lines))
            lm_corpus = "\n".join(lines[:split])
            heldout_lm_corpus = "\n".join(lines[split:])
            lm_corpus_file.close()
            self.pos_lm = KneserNeySmoothingModel(
                order=3,
                discount=0.7,
                partial_words=self.args.partial_words,
                train_corpus=lm_corpus,
                heldout_corpus=heldout_lm_corpus,
                second_corpus=None)
        if edit_language_model:
            self.edit_lm = edit_language_model
        else:
            edit_lm_corpus_file = open(clean_model_dir +
                                       "/swbd_disf_train_1_edit.text")
            edit_lines = [
                line.strip("\n").split(",")[1] for line in edit_lm_corpus_file
                if "POS," not in line and not line.strip("\n") == ""
            ]
            edit_split = int(0.9 * len(edit_lines))
            edit_lm_corpus = "\n".join(edit_lines[:edit_split])
            heldout_edit_lm_corpus = "\n".join(edit_lines[edit_split:])
            edit_lm_corpus_file.close()
            self.edit_lm = KneserNeySmoothingModel(
                train_corpus=edit_lm_corpus,
                heldout_corpus=heldout_edit_lm_corpus,
                order=2,
                discount=0.7)
            # TODO an object for getting the lm features incrementally
            # in the language model

    def init_model_from_config(self, args):
        # for feat, val in args._get_kwargs():
        #     print feat, val, type(val)
        if not test_if_using_GPU():
            print "Warning: not using GPU, might be a bit slow"
            print "\tAdjust Theano config file ($HOME/.theanorc)"
        print "loading tag to index maps..."
        label_path = os.path.dirname(os.path.realpath(__file__)) +\
            "/../data/tag_representations/{}_tags.csv".format(args.tags)
        word_path = os.path.dirname(os.path.realpath(__file__)) +\
            "/../data/tag_representations/{}.csv".format(args.word_rep)
        pos_path = os.path.dirname(os.path.realpath(__file__)) +\
            "/../data/tag_representations/{}.csv".format(args.pos_rep)
        self.tag_to_index_map = load_tags(label_path)
        self.word_to_index_map = load_tags(word_path)
        self.pos_to_index_map = load_tags(pos_path)
        self.model_type = args.model_type
        vocab_size = len(self.word_to_index_map.keys())
        emb_dimension = args.emb_dimension
        n_hidden = args.n_hidden
        n_extra = args.n_language_model_features + args.n_acoustic_features
        n_classes = len(self.tag_to_index_map.keys())
        self.window_size = args.window
        n_pos = len(self.pos_to_index_map.keys())
        update_embeddings = args.update_embeddings
        lr = args.lr
        print "Initializing model of type", self.model_type, "..."
        if self.model_type == 'elman':
            model = Elman(ne=vocab_size,
                          de=emb_dimension,
                          nh=n_hidden,
                          na=n_extra,
                          n_out=n_classes,
                          cs=self.window_size,
                          npos=n_pos,
                          update_embeddings=update_embeddings)
            self.initial_h0_state = model.h0.get_value()
            self.initial_c0_state = None

        elif self.model_type == 'lstm':
            model = LSTM(ne=vocab_size,
                         de=emb_dimension,
                         n_lstm=n_hidden,
                         na=n_extra,
                         n_out=n_classes,
                         cs=self.window_size,
                         npos=n_pos,
                         lr=lr,
                         single_output=True,
                         cost_function='nll')
            self.initial_h0_state = model.h0.get_value()
            self.initial_c0_state = model.c0.get_value()
        else:
            raise NotImplementedError('No model init for {0}'.format(
                self.model_type))
        return model

    def load_model_params_from_folder(self, model_folder, model_type):
        if model_type in ["lstm", "elman"]:
            self.model.load_weights_from_folder(model_folder)
            self.initial_h0_state = self.model.h0.get_value()
            if model_type == "lstm":
                self.initial_c0_state = self.model.c0.get_value()
        else:
            raise NotImplementedError(
                'No weight loading for {0}'.format(model_type))

    def load_embeddings(self, embeddings_name):
        # load pre-trained embeddings
        embeddings_dir = os.path.dirname(os.path.realpath(__file__)) +\
                                "/../embeddings/"
        pretrained = gensim.models.Word2Vec.load(embeddings_dir +
                                                 embeddings_name)
        print "emb shape", pretrained[pretrained.index2word[0]].shape
        # print pretrained[0].shape
        # assign and fill in the gaps
        emb = populate_embeddings(self.args.emb_dimension,
                                  len(self.word_to_index_map.items()),
                                  self.word_to_index_map, pretrained)
        self.model.load_weights(emb=emb)

    def standardize_word_and_pos(
            self,
            word,
            pos=None,
            proper_name_pos_tags=["NNP", "NNPS", "CD", "LS", "SYM", "FW"]):
        word = word.lower()
        if not pos and self.pos_tagger:
            pos = self.pos_tagger.tag([])  # TODO
        if pos:
            pos = pos.upper()
            if pos in proper_name_pos_tags and "$unc$" not in word:
                word = "$unc$" + word
            if self.pos_to_index_map.get(pos) is None:
                # print "unknown pos", pos
                pos = "<unk>"
        if self.word_to_index_map.get(word) is None:
            # print "unknown word", word
            word = "<unk>"
        return word, pos

    def tag_new_word(self,
                     word,
                     pos=None,
                     timing=None,
                     extra=None,
                     diff_only=True,
                     rollback=0):
        """Tag new incoming word and update the word and tag graphs.

        :param word: the word to consume/tag
        :param pos: the POS tag to consume/tag (optional)
        :param timing: the duration of the word (optional)
        :param diff_only: whether to output only the diffed suffix,
        if False, outputs entire output tags
        :param rollback: the number of words to rollback
        in the case of changed word hypotheses from an ASR
        """
        self.rollback(rollback)
        if pos is None and self.args.pos:
            # if no pos tag provided but there is a pos-tagger, tag word
            test_words = [
                unicode(x) for x in get_last_n_features(
                    "words", self.word_graph, len(self.word_graph) - 1, n=4)
            ] + [unicode(word.lower())]
            pos = self.pos_tagger.tag(test_words)[-1][1]
            # print "tagging", word, "as", pos
        # 0. Add new word to word graph
        word, pos = self.standardize_word_and_pos(word, pos)
        # print "New word:", word, pos
        self.word_graph.append((word, pos, timing))
        # 1. load the saved internal rnn state
        # TODO these nets aren't (necessarily) trained statefully
        # The internal state in training self.args.bs words back
        # are the inital ones in training, however here
        # They are the actual state reached.
        if self.state_history == []:
            c0_state = self.initial_c0_state
            h0_state = self.initial_h0_state
        else:
            if self.model_type == "lstm":
                c0_state = self.state_history[-1][0][-1]
                h0_state = self.state_history[-1][1][-1]
            elif self.model_type == "elman":
                h0_state = self.state_history[-1][-1]

        if self.model_type == "lstm":
            self.model.load_weights(c0=c0_state, h0=h0_state)
        elif self.model_type == "elman":
            self.model.load_weights(h0=h0_state)
        else:
            raise NotImplementedError("no history loading for\
                             {0} model".format(self.model_type))

        # 2. do the softmax output with converted inputs
        word_window = [
            self.word_to_index_map[x]
            for x in get_last_n_features("words",
                                         self.word_graph,
                                         len(self.word_graph) - 1,
                                         n=self.window_size)
        ]
        pos_window = [
            self.pos_to_index_map[x]
            for x in get_last_n_features("POS",
                                         self.word_graph,
                                         len(self.word_graph) - 1,
                                         n=self.window_size)
        ]
        # print "word_window, pos_window", word_window, pos_window
        if self.model_type == "lstm":
            h_t, c_t, s_t = self.model.\
                soft_max_return_hidden_layer([word_window], [pos_window])
            self.softmax_history.append(s_t)
            if len(self.state_history) == 20:  # just saving history
                self.state_history.pop(0)  # pop first one
            self.state_history.append((c_t, h_t))
        elif self.model_type == "elman":
            h_t, s_t = self.model.soft_max_return_hidden_layer([word_window],
                                                               [pos_window])
            self.softmax_history.append(s_t)
            if len(self.state_history) == 20:
                self.state_history.pop(0)  # pop first one
            self.state_history.append(h_t)
        else:
            raise NotImplementedError("no softmax implemented for\
                                 {0} model".format(self.model_type))
        softmax = np.concatenate(self.softmax_history)

        # 3. do the decoding on the softmax
        if "disf" in self.args.tags:
            edit_tag = "<e/><cc>" if "uttseg" in self.args.tags else "<e/>"
            # print self.tag_to_index_map[edit_tag]
            adjustsoftmax = np.concatenate(
                (softmax, softmax[:, self.tag_to_index_map[edit_tag]].reshape(
                    softmax.shape[0], 1)), 1)
        else:
            adjustsoftmax = softmax
        last_n_timings = None if ((not self.args.use_timing_data) or
                                  not timing) \
            else get_last_n_features("timings", self.word_graph,
                                     len(self.word_graph)-1,
                                     n=3)
        new_tags = self.decoder.viterbi_incremental(
            adjustsoftmax,
            a_range=(len(adjustsoftmax) - 1, len(adjustsoftmax)),
            changed_suffix_only=True,
            timing_data=last_n_timings,
            words=[word])
        # print "new tags", new_tags
        prev_output_tags = deepcopy(self.output_tags)
        self.output_tags = self.output_tags[:len(self.output_tags) -
                                            (len(new_tags) - 1)] + new_tags

        # 4. convert to standardized output format
        if "simple" in self.args.tags:
            for p in range(
                    len(self.output_tags) - (len(new_tags) + 1),
                    len(self.output_tags)):
                rps = self.output_tags[p]
                self.output_tags[p] = rps.replace('rm-0',
                                                  'rps id="{}"'.format(p))
                if "<i" in self.output_tags[p]:
                    self.output_tags[p] = self.output_tags[p].\
                        replace("<e/>", "").replace("<i", "<e/><i")
        else:
            # new_words = [word]
            words = get_last_n_features("words",
                                        self.word_graph,
                                        len(self.word_graph) - 1,
                                        n=len(self.word_graph) -
                                        (self.window_size - 1))
            self.output_tags = convert_from_inc_disfluency_tags_to_eval_tags(
                self.output_tags,
                words,
                start=len(self.output_tags) - (len(new_tags)),
                representation=self.args.tags)
        if diff_only:
            for i, old_new in enumerate(zip(prev_output_tags,
                                            self.output_tags)):
                old, new = old_new
                if old != new:
                    return self.output_tags[i:]
            return self.output_tags[len(prev_output_tags):]
        return self.output_tags

    def tag_utterance(self, utterance):
        """Tags entire utterance, only possible on models
        trained on unsegmented data.
        """
        if not self.args.utts_presegmented:
            raise NotImplementedError("Tagger trained on unsegmented data,\
            please call tag_prefix(words) instead.")
        # non segmenting
        self.reset()  # always starts in initial state
        if not self.args.pos:  # no pos tag model
            utterance = [(w, None, t) for w, p, t in utterance]
            # print "Warning: not using pos tags as not pos tag model"
        if not self.args.use_timing_data:
            utterance = [(w, p, None) for w, p, t in utterance]
            # print "Warning: not using timing durations as no timing model"
        for w, p, t in utterance:
            if self.args.pos:
                self.tag_new_word(w, pos=p, timing=t)
        return self.output_tags

    def rollback(self, backwards):
        super(DeepDisfluencyTagger, self).rollback(backwards)
        self.state_history = self.state_history[:len(self.state_history) -
                                                backwards]
        self.softmax_history = self.softmax_history[:len(self.softmax_history
                                                         ) - backwards]
        self.decoder.rollback(backwards)

    def init_deep_model_internal_state(self):
        if self.model_type == "lstm":
            self.model.load_weights(c0=self.initial_c0_state,
                                    h0=self.initial_h0_state)
        elif self.model_type == "elman":
            self.model.load_weights(h0=self.initial_h0_state)

    def reset(self):
        super(DeepDisfluencyTagger, self).reset()
        self.word_graph = [("<s>", "<s>", 0)] * \
            (self.window_size - 1)
        self.state_history = []
        self.softmax_history = []
        self.decoder.viterbi_init()
        self.init_deep_model_internal_state()

    def evaluate_fast_from_matrices(self, validation_matrices, tag_file,
                                    idx_to_label_dict):
        output = []
        true_y = []
        for v in validation_matrices:
            words_idx, pos_idx, extra, y, indices = v
            if extra:
                output.extend(
                    self.model.classify_by_index(words_idx, indices, pos_idx,
                                                 extra))
            else:
                output.extend(
                    self.model.classify_by_index(words_idx, indices, pos_idx))
            true_y.extend(y)
        p_r_f_tags = precision_recall_fscore_support(true_y,
                                                     output,
                                                     average='macro')
        tag_summary = classification_report(
            true_y,
            output,
            labels=[i for i in xrange(len(idx_to_label_dict.items()))],
            target_names=[
                idx_to_label_dict[i]
                for i in xrange(len(idx_to_label_dict.items()))
            ])
        print tag_summary
        results = {
            "f1_rmtto": p_r_f_tags[2],
            "f1_rm": p_r_f_tags[2],
            "f1_tto1": p_r_f_tags[2],
            "f1_tto2": p_r_f_tags[2]
        }

        results.update({'f1_tags': p_r_f_tags[2], 'tag_summary': tag_summary})
        return results

    def train_net(self,
                  train_dialogues_filepath=None,
                  validation_dialogues_filepath=None,
                  model_dir=None,
                  tag_accuracy_file_path=None):
        """Train the internal deep learning model
        from a list of dialogue matrices.
        """
        tag_accuracy_file = open(tag_accuracy_file_path, "a")
        print "Verifying files..."
        for filepath in [
                train_dialogues_filepath, validation_dialogues_filepath
        ]:
            if not verify_dialogue_data_matrices_from_folder(
                    filepath,
                    word_dict=self.word_to_index_map,
                    pos_dict=self.pos_to_index_map,
                    tag_dict=self.tag_to_index_map,
                    n_lm=self.args.n_language_model_features,
                    n_acoustic=self.args.n_acoustic_features):
                raise Exception("Dialogue vectors in wrong format!\
                See README.md.")
        lr = self.args.lr  # even if decay, start with specific lr
        n_extra = self.args.n_language_model_features + \
            self.args.n_acoustic_features
        # validation matrices filepath much smaller so can store these
        # and preprocess them all:
        validation_matrices = [
            np.load(validation_dialogues_filepath + "/" + fp)
            for fp in os.listdir(validation_dialogues_filepath)
        ]
        validation_matrices = [
            dialogue_data_and_indices_from_matrix(
                d_matrix,
                n_extra,
                pre_seg=self.args.utts_presegmented,
                window_size=self.window_size,
                bs=self.args.bs,
                tag_rep=self.args.tags,
                tag_to_idx_map=self.tag_to_index_map,
                in_utterances=self.args.utts_presegmented)
            for d_matrix in validation_matrices
        ]
        idx_2_label_dict = {v: k for k, v in self.tag_to_index_map.items()}
        if not os.path.exists(model_dir):
            os.mkdir(model_dir)
        start = 1  # by default start from the first epoch
        best_score = 0
        best_epoch = 0
        print "Net training started..."
        for e in range(start, self.args.n_epochs + 1):
            tic = time.time()
            epoch_folder = model_dir + "/epoch_{}".format(e)
            if not os.path.exists(epoch_folder):
                os.mkdir(epoch_folder)
            train_loss = 0
            # TODO IO is slow, where the memory allows do in one
            load_separately = True
            test = False
            if load_separately:
                for i, dialogue_f in enumerate(
                        os.listdir(train_dialogues_filepath)):
                    if test and i > 3:
                        break
                    print dialogue_f
                    d_matrix = np.load(train_dialogues_filepath + "/" +
                                       dialogue_f)
                    word_idx, pos_idx, extra, y, indices = \
                        dialogue_data_and_indices_from_matrix(
                                          d_matrix,
                                          n_extra,
                                          window_size=self.window_size,
                                          bs=self.args.bs,
                                          pre_seg=self.args.utts_presegmented
                                                              )
                    # for i in range(len(indices)):
                    #     print i, word_idx[i], pos_idx[i], \
                    #     y[i], indices[i]
                    train_loss += self.model.fit(word_idx,
                                                 y,
                                                 lr,
                                                 indices,
                                                 pos_idx=pos_idx,
                                                 extra_features=extra)
                    print '[learning] file %i >>' % (i+1),\
                        'completed in %.2f (sec) <<\r' % (time.time() - tic)
            # save the initial states we've learned to override the random
            self.initial_h0_state = self.model.h0.get_value()
            if self.args.model_type == "lstm":
                self.initial_c0_state = self.model.c0.get_value()
            # reset and evaluate simply
            self.reset()
            results = self.evaluate_fast_from_matrices(
                validation_matrices,
                tag_accuracy_file,
                idx_to_label_dict=idx_2_label_dict)
            val_score = results['f1_tags']  #TODO get best score type
            print "epoch training loss", train_loss
            print '[learning] epoch %i >>' % (e),\
                'completed in %.2f (sec) <<\r' % (time.time() - tic)
            print "validation score", val_score
            tag_accuracy_file.write(
                str(e) + "\n" + results['tag_summary'] + "\n%%%%%%%%%%\n")
            tag_accuracy_file.flush()
            print "saving model..."
            self.model.save(epoch_folder)  # Epoch file dump
            # checking patience and decay, if applicable
            # stopping criterion
            if val_score > best_score:
                self.model.save(model_dir)
                best_score = val_score
                print 'NEW BEST raw labels at epoch ', e, 'best valid',\
                    best_score
                best_epoch = e
            # stopping criteria = if no improvement in 10 epochs
            if e - best_epoch >= 10:
                print "stopping, no improvement in 10 epochs"
                break
            if self.args.decay and (e - best_epoch) > 1:
                # just a steady decay if things aren't improving for 2 epochs
                # a hidden hyperparameter
                decay_rate = 0.85
                lr *= decay_rate
                print "learning rate decayed, now ", lr
            if lr < 1e-5:
                print "stopping, below learning rate threshold"
                break
            print '[learning and testing] epoch %i >>' % (e),\
                'completed in %.2f (sec) <<\r' % (time.time()-tic)

        print 'BEST RESULT: epoch', best_epoch, 'valid score', best_score
        tag_accuracy_file.close()
        return best_epoch

    def incremental_output_from_file(self,
                                     source_file_path,
                                     target_file_path=None,
                                     is_asr_results_file=False):
        """Return the incremental output in an increco style
        given the incoming words + POS. E.g.:

        Speaker: KB3_1

        Time: 1.50
        KB3_1:1    0.00    1.12    $unc$yes    NNP    <f/><tc/>

        Time: 2.10
        KB3_1:1    0.00    1.12    $unc$yes    NNP    <rms id="1"/><tc/>
        KB3_1:2    1.12    2.00     because    IN    <rps id="1"/><cc/>

        Time: 2.5
        KB3_1:2    1.12    2.00     because    IN    <rps id="1"/><rpndel id="1"/><cc/>

        from an ASR increco style input without the POStags:

        or a normal style disfluency dectection ground truth corpus:

        Speaker: KB3_1
        KB3_1:1    0.00    1.12    $unc$yes    NNP    <rms id="1"/><tc/>
        KB3_1:2    1.12    2.00     $because    IN    <rps id="1"/><cc/>
        KB3_1:3    2.00    3.00    because    IN    <f/><cc/>
        KB3_1:4    3.00    4.00    theres    EXVBZ    <f/><cc/>
        KB3_1:6    4.00    5.00    a    DT    <f/><cc/>
        KB3_1:7    6.00    7.10    pause    NN    <f/><cc/>


        :param source_file_path: str, file path to the input file
        :param target_file_path: str, file path to output in the above format
        :param is_asr_results_file: bool, whether the input is increco style
        """
        if target_file_path:
            target_file = open(target_file_path, "w")
        if not self.args.do_utt_segmentation:
            print "not doing utt seg, using pre-segmented file"
        if is_asr_results_file:
            return NotImplementedError
        if 'timings' in source_file_path:
            print "input file has timings"
            if not is_asr_results_file:
                dialogues = []
                IDs, timings, words, pos_tags, labels = \
                    get_tag_data_from_corpus_file(source_file_path)
                for dialogue, a, b, c, d in zip(IDs, timings, words, pos_tags,
                                                labels):
                    dialogues.append((dialogue, (a, b, c, d)))
        else:
            print "no timings in input file, creating fake timings"
            raise NotImplementedError

        for speaker, speaker_data in dialogues:
            # if "4565" in speaker: quit()
            print speaker
            self.reset()  # reset at the beginning of each dialogue
            if target_file_path:
                target_file.write("Speaker: " + str(speaker) + "\n\n")
            timing_data, lex_data, pos_data, labels = speaker_data
            # iterate through the utterances
            # utt_idx = -1
            current_time = 0
            for i in range(0, len(timing_data)):
                # print i, timing_data[i]
                _, end = timing_data[i]
                if (not self.args.do_utt_segmentation) \
                        and "<t" in labels[i]:
                    self.reset()  # reset after each utt if non pre-seg
                # utt_idx = frames[i]
                timing = None
                if 'timings' in source_file_path and self.args.use_timing_data:
                    timing = end - current_time
                word = lex_data[i]
                pos = pos_data[i]
                diff = self.tag_new_word(word,
                                         pos,
                                         timing,
                                         diff_only=True,
                                         rollback=0)
                current_time = end
                if target_file_path:
                    target_file.write("Time: " + str(current_time) + "\n")
                    new_words = lex_data[i - (len(diff) - 1):i + 1]
                    new_pos = pos_data[i - (len(diff) - 1):i + 1]
                    new_timings = timing_data[i - (len(diff) - 1):i + 1]
                    for t, w, p, tag in zip(new_timings, new_words, new_pos,
                                            diff):
                        target_file.write("\t".join(
                            [str(t[0]), str(t[1]), w, p, tag]))
                        target_file.write("\n")
                    target_file.write("\n")
            target_file.write("\n")

    def train_decoder(self, tag_file):
        raise NotImplementedError

    def save_decoder_model(self, dir_path):
        raise NotImplementedError
Exemplo n.º 24
0
class SimpleSLU:
    def __init__(self):
        self.__semantic_instance_list = []
        self.__speech_act_instance_list = []

        self.__semantic_model = None
        self.__speech_act_model = None

        self.__speech_act_lb = None

    def load_model(self, modelfile):
        with open('%s.act.model' % modelfile, 'r') as f:
            self.__speech_act_model, self.__speech_act_lb = pickle.load(f)

        self.__semantic_model = CRFTagger(verbose=True)
        self.__semantic_model.set_model_file('%s.semantic.model' % modelfile)

        return True

    def add_instance(self, utter, speech_act, semantic_tagged):
        tokenized = self.__tokenize(utter, semantic_tagged)
        if tokenized is None:
            return False

        semantic_instance = []
        for word, (bio, tag, attrs) in tokenized:
            if bio is None:
                sem_label = 'O'
            else:
                cat = None
                for attr, val in attrs:
                    if attr == 'cat':
                        cat = val
                sem_label = '%s-%s_%s' % (bio, tag, cat)
            semantic_instance.append((unicode(word.lower()), unicode(sem_label)))
        self.__semantic_instance_list.append(semantic_instance)

        sa_label_list = []
        for sa in speech_act:
            sa_labels = ['%s_%s' % (sa['act'], attr) for attr in sa['attributes']]
            sa_label_list += sa_labels

        sa_label_list = sorted(set(sa_label_list))

        word_feats = ' '.join([word.lower() for word, _ in tokenized])
        self.__speech_act_instance_list.append((word_feats, sa_label_list))

        return True

    def train(self, modelfile):
        sa_feats = [x for x, _ in self.__speech_act_instance_list]
        sa_labels = [y for _, y in self.__speech_act_instance_list]

        self.__speech_act_lb = preprocessing.MultiLabelBinarizer()
        sa_labels = self.__speech_act_lb.fit_transform(sa_labels)

        self.__speech_act_model = Pipeline([
            ('vectorizer', CountVectorizer()),
            ('tfidf', TfidfTransformer()),
            ('clf', OneVsRestClassifier(LinearSVC(verbose=True)))])

        self.__speech_act_model.fit(sa_feats, sa_labels)

        with open('%s.act.model' % modelfile, 'wb') as f:
            pickle.dump((self.__speech_act_model, self.__speech_act_lb), f)

        self.__semantic_model = CRFTagger(verbose=True)
        self.__semantic_model.train(self.__semantic_instance_list, '%s.semantic.model' % modelfile)

    def pred(self, utter):
        tokenized = self.__tokenize(utter)
        word_feats = ' '.join([word.lower() for word, _ in tokenized])

        pred_act = self.__speech_act_lb.inverse_transform(self.__speech_act_model.predict([word_feats]))
        pred_semantic = self.__semantic_model.tag([word.lower() for word, _ in tokenized])

        return (pred_act, pred_semantic)

    def __tokenize(self, utter, semantic_tagged=None):
        result = None
        if semantic_tagged is None:
            result = [(word, None) for word in nltk.word_tokenize(utter)]
        else:
            parser_raw = SemanticTagParser(False)
            parser_tagged = SemanticTagParser(False)

            segmented = ' '.join(nltk.word_tokenize(utter))
            tagged = ' '.join(semantic_tagged)

            parser_raw.feed(segmented)
            parser_tagged.feed(tagged)

            raw_chr_seq = parser_raw.get_chr_seq()
            raw_space_seq = parser_raw.get_chr_space_seq()

            tagged_chr_seq = parser_tagged.get_chr_seq()
            tagged_space_seq = parser_tagged.get_chr_space_seq()

            if raw_chr_seq == tagged_chr_seq:
                merged_space_seq = [
                    x or y for x, y in zip(raw_space_seq, tagged_space_seq)]

                word_seq = parser_tagged.tokenize(merged_space_seq)
                tag_seq = parser_tagged.get_word_tag_seq()

                result = [(word, tag) for word, tag in zip(word_seq, tag_seq)]

        return result
Exemplo n.º 25
0
# -*- coding: utf-8 -*-
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.tag import CRFTagger

ct = CRFTagger()
ct.set_model_file('./input/tagger/indonesian_tagger')


# memasukkan fitur ke dictionary
def insert_dict(kata, fitur):
    if kata not in fitur:
        fitur[kata] = 1
    else:
        fitur[kata] += 1
    return fitur


# berikan tag pada
def beri_tag(kata):
    kata_tag = ct.tag_sents([word_tokenize(kata)])
    return kata_tag


def tf_baseline(kalimat):
    fitur = {}
    clean_punct = RegexpTokenizer(r'\w+')
    arr = [clean_punct.tokenize(kalimat.lower())]
    hasil = ct.tag_sents(arr)
    for sentence in hasil:
        for word_tag in sentence: