예제 #1
0
 def setUp(self):
     self.asl = AslDb()
     self.training_set = self.asl.build_training(FEATURES)
     self.test_set = self.asl.build_test(FEATURES)
     self.models = train_all_words(self.training_set, SelectorConstant)
예제 #2
0
 def setUp(self):
     self.asl = AslDb()
     self.training_set = self.asl.build_training(FEATURES)
     self.test_set = self.asl.build_test(FEATURES)
     self.models = train_all_words(self.training_set, SelectorConstant)
예제 #3
0
test_SLM = BasicSLM("SLM_data/corpus_sentences.txt", verbose=False)
feature_set = features_custom
selector = SelectorCV
training_set = asl.build_training(feature_set)
testing_set = asl.build_test(feature_set)
train_words = training_set.words
test_words = testing_set.wordlist
#train_words   = ['FISH', 'BOOK', 'VEGETABLE']
#test_words    = ['FISH', 'BOOK', 'VEGETABLE']
sentences = testing_set.sentences_index
sentences = [sentences[i] for i in sentences]

models_dict = train_all_words(training_set,
                              selector,
                              train_words,
                              verbose=False,
                              features=feature_set)

test_probs, test_guesses = recognize_words(models_dict,
                                           testing_set,
                                           test_words,
                                           verbose=False)
acc_before = report_recognizer_results(test_words, test_probs, test_guesses,
                                       selector, test_SLM, feature_set)

with open("recognizer_results/raw_results.txt", 'w') as file:
    json.dump((test_probs, test_guesses, test_words, sentences), file)

#test_SLM_probs = get_SLM_probs(test_guesses, test_probs, test_SLM)
#new_probs, new_guesses = normalise_and_combine(test_words, test_probs, test_SLM_probs, test_guesses, 1)