def _create_lm_from_counts(self, smoothing):
     lm = LanguageModel(part=self._part)
     lm.set_ngram_size(self._ngram_size)
     for context in self._lm_counts:
         total_notes_after_context = sum(self._lm_counts[context].values())
         if len(self._lm_counts[context].keys()) > 2:
             context_counts = self._lm_counts[context].items()
             for (note, count) in context_counts:
                 # approximately 4 octaves in our vocabulary (48 notes)
                 prob = (count + smoothing) / float(total_notes_after_context + (48 * smoothing))
                 lm.add_to_model(context, note, prob)
             lm.add_to_model(context, "<UNK>", (smoothing / float(total_notes_after_context + (48 * smoothing))))
     return lm