Ejemplo n.º 1
0
def grid_search_models():
    model_selectors = {
        "CV": SelectorCV,
        "BIC": SelectorBIC,
        "DIC": SelectorDIC
    }
    features_dict = {
        "GROUND": features_ground,
        "NORM": features_norm,
        "POLAR": features_polar,
        "DELTA": features_delta,
        "CUSTOM": features_norm
    }
    wers = {}
    grid = {}
    for feat_name, features in features_dict.items():
        test_set = asl.build_test(features)
        words_data = asl.build_training(features)
        for sel_name, selector in model_selectors.items():
            models = train_all_words2(words_data, selector)
            probabilities, guesses = recognize(models, test_set)
            key = sel_name + "-" + feat_name
            print(key)
            wer = show_errors(guesses, test_set)
            wers[key] = wer
            grid[key] = (wer, models)
    s = [(k, wers[k]) for k in sorted(wers, key=wers.get, reverse=False)]
    print(s)
    pickle.dump(grid, open("data/grid_models.pkl", "wb"))
Ejemplo n.º 2
0
def run_test(name, features, model_selector):
    print("running ", name)

    models = train_all_words(features, model_selector)
    test_set = asl.build_test(features)
    probabilities, guesses = recognize(models, test_set)
    show_errors(guesses, test_set)
Ejemplo n.º 3
0
 def test_recognize_guesses_interface(self):
     _, guesses = recognize(self.models, self.test_set)
     # print(guesses)
     self.assertEqual(len(guesses), self.test_set.num_items,
                      "Number of test items in guesses list incorrect.")
     self.assertIsInstance(guesses[0], str, "The guesses are not strings")
     self.assertIsInstance(guesses[-1], str, "The guesses are not strings")
Ejemplo n.º 4
0
def experiment(asl, feature_name, feature, selector_name, selector):
    start = timeit.default_timer()
    models = train_all_words(asl, feature, selector)
    t_time = timeit.default_timer()-start
    test_set = asl.build_test(feature)
    probabilities, guesses = recognize(models, test_set)
    r_time = timeit.default_timer()-start
    show_results(feature_name, selector_name, t_time, r_time, guesses, test_set)
Ejemplo n.º 5
0
 def test_recognize_probabilities_interface(self):
     probs, _ = recognize(self.models, self.test_set)
     self.assertEqual(len(probs), self.test_set.num_items, "Number of test items in probabilities list incorrect.")
     self.assertEqual(len(probs[0]), self.training_set.num_items,
                      "Number of training word probabilities in test item dictionary incorrect.")
     self.assertEqual(len(probs[-1]), self.training_set.num_items,
                      "Number of training word probabilities in test item dictionary incorrect.")
     self.assertIn('FRANK', probs[0], "Dictionary of probabilities does not contain correct keys")
     self.assertIn('CHICKEN', probs[-1], "Dictionary of probabilities does not contain correct keys")
Ejemplo n.º 6
0
 def test_recognize_probabilities_interface(self):
     probs, _ = recognize(self.models, self.test_set)
     self.assertEqual(len(probs), self.test_set.num_items, "Number of test items in probabilities list incorrect.")
     self.assertEqual(len(probs[0]), self.training_set.num_items,
                      "Number of training word probabilities in test item dictionary incorrect.")
     self.assertEqual(len(probs[-1]), self.training_set.num_items,
                      "Number of training word probabilities in test item dictionary incorrect.")
     self.assertIn('FRANK', probs[0], "Dictionary of probabilities does not contain correct keys")
     self.assertIn('CHICKEN', probs[-1], "Dictionary of probabilities does not contain correct keys")
Ejemplo n.º 7
0
def customRec(asl):
    # autoreload for automatically reloading changes made in my_model_selectors and my_recognizer
    from my_model_selectors import SelectorConstant
    from my_model_selectors import SelectorBIC
    from my_model_selectors import SelectorDIC
    from my_model_selectors import SelectorCV

    features_ground = ['grnd-rx', 'grnd-ry', 'grnd-lx', 'grnd-ly']
    features_polar = ['polar-rr', 'polar-rtheta', 'polar-lr', 'polar-ltheta']
    features_norm = ['norm-rx', 'norm-ry', 'norm-lx', 'norm-ly']
    features_delta = ['delta-rx', 'delta-ry', 'delta-lx', 'delta-ly']
    features_custom = [
        'grnd-rx', 'grnd-ry', 'grnd-lx', 'grnd-ly', 'delta-rx', 'delta-ry',
        'delta-lx', 'delta-ly'
    ]

    def train_all_words(features, model_selector):
        training = asl.build_training(
            features
        )  # Experiment here with different feature sets defined in part 1
        sequences = training.get_all_sequences()
        Xlengths = training.get_all_Xlengths()
        model_dict = {}
        for word in training.words:
            model = model_selector(sequences, Xlengths, word,
                                   n_constant=3).select()
            model_dict[word] = model
        return model_dict

    models = train_all_words(features_ground, SelectorConstant)
    print("Number of word models returned = {}".format(len(models)))

    from my_recognizer import recognize
    from asl_utils import show_errors
    import timeit

    features_list = [
        features_ground, features_norm, features_polar, features_delta,
        features_custom
    ]
    model_selector_list = [
        SelectorConstant, SelectorBIC, SelectorDIC, SelectorCV
    ]
    #model_selector_list = [SelectorBIC]
    for features in features_list:
        for model_selector in model_selector_list:
            print("\n", features, model_selector.__name__)
            start = timeit.default_timer()
            models = train_all_words(features, model_selector)
            test_set = asl.build_test(features)
            probabilities, guesses = recognize(models, test_set)
            end = timeit.default_timer() - start
            print("Training and test took {} seconds".format(end))
            show_errors(guesses, test_set)

    return
Ejemplo n.º 8
0
 def run(self):
     print("Start recognition...")
     probabilities_guesses_list = [
         recognize(m, self.test_set) for m in self.models
     ]
     print("Show errors for {} with {}".format(self.selector_name_list,
                                               self.features))
     self.show_errors_for([p_g[1] for p_g in probabilities_guesses_list],
                          self.selector_name_list, self.test_set)
     print("Train time: {}".format(self.end_train - self.start_train))
Ejemplo n.º 9
0
def guess_with_cv_polar_and_slm():
    training = asl.build_training(features_polar)
    models_cv_polar = train_all_words2(training, SelectorCV)
    test_set = asl.build_test(features_polar)
    probabilities, guesses = recognize(models_cv_polar, test_set)
    print("With cv_polar models")
    show_errors(guesses, test_set)

    pguesses = guess_by_combination(test_set, probabilities)
    guesses = [w for (w, p) in pguesses]
    print("With cv_polar models and language model")
    show_errors(guesses, test_set)
Ejemplo n.º 10
0
def recognize_ngram(lm, models, test_set):
    probabilities, guesses = recognize(models, test_set, num_guesses=3)
    # train for these values?
    alpha = 1
    beta = 25

    # Takes a long time to run With 3 guesses per word, longest sentence with 8
    # words has 3^8=6561 possibilities -> Reduce somehow?
    sentence_guesses = {}
    for video_num in test_set.sentences_index:
        sentence_word_indexes = test_set.sentences_index[video_num]
        hmm_max_l_words = [guesses[i] for i in sentence_word_indexes]
        possible_sentences = itertools.product(*hmm_max_l_words)
        best_sentence = None
        best_l = float("-inf")
        for s in possible_sentences:
            try:
                hmm_sentence_l = sum([probabilities[video_num][w] for w in s])
                lm_sentence_l = lm.log_s(' '.join(s))
                total_sentence_l = alpha * hmm_sentence_l + beta * lm_sentence_l
                if total_sentence_l > best_l:
                    best_l = total_sentence_l
                    best_sentence = s
            except:
                continue
        if best_sentence is not None:
            sentence_guesses[video_num] = best_sentence

    errors = 0
    for video_num in sentence_guesses:
        correct_sentence = [
            test_set.wordlist[i] for i in test_set.sentences_index[video_num]
        ]
        recognised_sentence = sentence_guesses[video_num]
        for c, r in zip(correct_sentence, list(recognised_sentence)):
            if c != r:
                errors += 1
        # print('Correct {}'.format(correct_sentence))
        # print('Recognised {}'.format(recognised_sentence))
        # print()
    print(float(errors) / float(178))
Ejemplo n.º 11
0
def ensemble_models():
    features_data = [
        (name, f, s)
        for (name, f,
             s) in [("ground", features_ground,
                     SelectorBIC), ("norm", features_norm, SelectorDIC),
                    ("polar", features_polar,
                     SelectorBIC), (
                         "delta", features_delta,
                         SelectorCV), ("custom", features_custom, SelectorDIC)]
    ]
    features_probs = {}
    features_success_rates = {}
    test_set = None
    for name, features, selector in features_data:
        test_set = asl.build_test(features)
        words_data = asl.build_training(features)
        models = train_all_words2(words_data, selector)
        #probabilities is a list of dictionaries, each dictionary gives each possible word prob for the sequence [{word:prob}]
        probabilities, guesses = recognize(models, test_set)
        #todo use the feature relative wer as its proportion in the ensemble output
        features_probs[name] = probabilities
        print("{} features:".format(name))
        wer = show_errors(guesses, test_set)
        features_success_rates[name] = 1 - wer

    sm = sum(features_success_rates.values())
    features_weights = [(k, v / sm)
                        for (k, v) in features_success_rates.items()]
    pickle.dump(
        {
            "features_probs": features_probs,
            "features_weights": features_weights
        }, open("data/feature_models_data.pkl", "wb"))

    ensemble_guess(features_probs, features_weights, test_set)
Ejemplo n.º 12
0
    print("Number of test set items: {}".format(test_set.num_items))
    print("Number of test set sentences: {}".format(
        len(test_set.sentences_index)))

    # TODO implement the recognize method in my_recognizer
    from my_recognizer import recognize
    from asl_utils import show_errors

    # TODO Choose a feature set and model selector
    features = features_ground  # change as needed
    model_selector = SelectorDIC  # change as needed

    # TODO Recognize the test set and display the result with the show_errors method
    models = train_all_words(features, model_selector)
    test_set = asl.build_test(features)
    probabilities, guesses = recognize(models, test_set)
    show_errors(guesses, test_set)

    # In[ ]:

    # TODO Choose a feature set and model selector
    # TODO Recognize the test set and display the result with the show_errors method

    # In[ ]:

    # TODO Choose a feature set and model selector
    # TODO Recognize the test set and display the result with the show_errors method

    # **Question 3:**  Summarize the error results from three combinations of features and model selectors.  What was the "best" combination and why?  What additional information might we use to improve our WER?  For more insight on improving WER, take a look at the introduction to Part 4.
    #
    # **Answer 3:**
Ejemplo n.º 13
0
 def test_recognize_guesses_interface(self):
     _, guesses = recognize(self.models, self.test_set)
     self.assertEqual(len(guesses), self.test_set.num_items, "Number of test items in guesses list incorrect.")
     self.assertIsInstance(guesses[0], str, "The guesses are not strings")
     self.assertIsInstance(guesses[-1], str, "The guesses are not strings")
 def print_out(self):
     probs, guesses = recognize(self.models, self.test_set)
     print(probs)
     print(guesses)
Ejemplo n.º 15
0
def run_my_recognizer(asl, features):
    from my_model_selectors import SelectorConstant
    from my_model_selectors import SelectorCV
    from my_model_selectors import SelectorBIC
    from my_model_selectors import SelectorDIC

    from my_recognizer import recognize
    from asl_utils import show_errors

    def train_all_words(features, model_selector, index):
        training = asl.build_training(features)

        print("Available Training words - words: ", training.words)
        print("Quantity of Training words - num_items: ", training.num_items)
        print("Chosen Training words: All words")

        # Get every fifth element since may be combination of
        # more than one Feature Set (where each Feature Set contains four elements)
        features_abbrev = features[::4]
        chosen_abbrev_list = []
        for i, abbrev in enumerate(features_abbrev):
            chosen_abbrev_list.append(abbrev.split('-')[0])
        chosen = ', '.join(map(str, chosen_abbrev_list))
        print("Chosen Features: ", chosen)

        # print("Chosen Features: ", features[index].split('-')[0])

        print("Chosen Model Selector: ", model_selector.__name__)

        sequences = training.get_all_sequences()

        # Note:
        #
        # training.get_all_Xlengths()
        #
        # This gets the entire db of words in the form of a
        # dictionary of (X, lengths) tuples, and returns two lists
        # for each word:
        # - where X is a numpy array of feature lists
        # (concatenation of all the sequences of frames with the
        #  values of the features i.e. grnd-rx, grnd-lx, ..., over time).
        # The first sequence of frames is 14 frames long, and
        # second is 18 frames long in the example, but only two from each
        # are shown. A sequence of frames corresponds to a succession of
        # movements corresponding to a word
        # - where lengths is a list of lengths of sequences within X
        # (length of the sequences concatenated in the first list)
        #
        # i.e. {'FRANK': (
        #                   array(
        #                       [[ 87, 225],[ 87, 225], ...
        #                       [ 87, 225,  62, 127], [ 87, 225,  65, 128]]
        #                   ),
        #                   [14, 18]
        #                ),
        #      }
        #
        # The first list

        Xlengths = training.get_all_Xlengths()
        words_to_train = training.words
        model_dict = {}

        timeframes = []
        states = []

        for word in words_to_train:
            start = timeit.default_timer()
            model = model_selector(sequences, Xlengths, word,
                                   n_constant=3).select()
            model_dict[word] = model
            end = timeit.default_timer() - start
            if model is not None:
                timeframes.append(end)
                states.append(model.n_components)

                print(
                    "Training complete for {} with {} states with time {} seconds"
                    .format(word, model.n_components, end))
            else:
                print("Training failed for {}".format(word))

        if timeframes:
            print("Average Timeframe: ", np.mean(timeframes))
        if states:
            print("Average States used: ", np.mean(states))

        return model_dict

    # features_ground   # Difference between hand and nose locations
    # features_norm     # Fix for different height and arm lengths
    # features_polar    # Fix discontinuity in signing area to prevent interfere with results
    # features_delta    # Speed and direction of hands
    # features_rescaled # Faster and interpretation and analysis of plotted data easier

    # All features
    features_all = features[0] + features[1] + features[2] + features[
        3] + features[4]

    # All features without norm and polar (i.e. without fixes)
    features_without_norm_and_polar = features[0] + features[3] + features[4]

    # All features without rescaled
    features_without_rescaled = features[0] + features[1] + features[
        2] + features[3]

    # All features without delta
    features_without_delta = features[0] + features[1] + features[
        2] + features[4]

    # CHOSEN COMBOS - Choose 3 OFF feature set and model selector combos
    # (16+ combinations all together)

    # Note: Ensure same length of of chosen_features and chosen_model_selectors
    chosen_features = [
        features_all, features_all, features_all, features_all,
        features_without_norm_and_polar, features_without_norm_and_polar,
        features_without_norm_and_polar, features_without_norm_and_polar,
        features_without_rescaled, features_without_rescaled,
        features_without_rescaled, features_without_rescaled,
        features_without_delta, features_without_delta, features_without_delta,
        features_without_delta, features[0], features[0], features[0],
        features[0], features[1], features[1], features[1], features[1],
        features[2], features[2], features[2], features[2], features[3],
        features[3], features[3], features[3], features[4], features[4],
        features[4], features[4]
    ]
    chosen_model_selectors = [
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC,
        SelectorConstant, SelectorCV, SelectorBIC, SelectorDIC
    ]

    # Iterate through the 3 OFF interesting combos chosen.
    # Recognize the test set and display the result with the
    # show_errors method
    for index, chosen_feature in enumerate(chosen_features):
        models = train_all_words(chosen_feature, chosen_model_selectors[index],
                                 index)
        print("Number of word models returned = {}".format(len(models)))

        test_set = asl.build_test(chosen_feature)
        print("Number of test set items: {}".format(test_set.num_items))
        print("Number of test set sentences: {}".format(
            len(test_set.sentences_index)))

        probabilities, guesses = recognize(models, test_set)

        # Show the Word Error Rate (WER) and sentence differences in tabular form
        try:
            result = show_errors(guesses, test_set)
        except Exception as e:
            print("Error showing errors: ", e)
        print("Finished processing combo... Trying others if any exist.")
 def lmfun(self):
     probs, guesses = recognize(self.models, self.test_set)