def test_equally_probable(self): """Test generation of equally probable values.""" # no overrides alp = ['A', 'B', 'C', 'D'] probs = equally_probable(alp) self.assertEqual(len(alp), len(probs)) for prob in probs: self.assertEqual(0.25, prob) # test with override alp = ['A', 'B', 'C', 'D'] probs = equally_probable(alp, {'A': 0.4}) self.assertEqual(len(alp), len(probs)) self.assertAlmostEqual(1.0, sum(probs)) self.assertEqual(probs[0], 0.4) self.assertAlmostEqual(probs[1], 0.2) self.assertAlmostEqual(probs[2], 0.2) self.assertAlmostEqual(probs[3], 0.2) # test with 0.0 override alp = ['A', 'B', 'C', 'D', 'E'] probs = equally_probable(alp, {'E': 0.0}) self.assertEqual(len(alp), len(probs)) self.assertAlmostEqual(1.0, sum(probs)) self.assertEqual(probs[0], 0.25) self.assertAlmostEqual(probs[1], 0.25) self.assertAlmostEqual(probs[2], 0.25) self.assertAlmostEqual(probs[3], 0.25) self.assertAlmostEqual(probs[4], 0.0) # test with multiple overrides alp = ['A', 'B', 'C', 'D'] probs = equally_probable(alp, {'B': 0.2, 'D': 0.3}) self.assertEqual(len(alp), len(probs)) self.assertAlmostEqual(1.0, sum(probs)) self.assertEqual(probs[0], 0.25) self.assertAlmostEqual(probs[1], 0.2) self.assertAlmostEqual(probs[2], 0.25) self.assertAlmostEqual(probs[3], 0.3) # test with override that's not in the alphabet alp = ['A', 'B', 'C', 'D'] probs = equally_probable(alp, {'F': 0.4}) self.assertEqual(len(alp), len(probs)) self.assertAlmostEqual(1.0, sum(probs)) for prob in probs: self.assertEqual(0.25, prob)
def initialize_epoch(self): """If a decision is made initializes the next epoch.""" try: # First, reset the history for this new epoch self.conjugator.reset_history() # If there is no language model specified, mock the LM prior # TODO: is the probability domain correct? ERP evidence is in # the log domain; LM by default returns negative log domain. if not self.lmodel: # mock probabilities to be equally likely for all letters. overrides = {BACKSPACE_CHAR: self.backspace_prob} prior = equally_probable(self.alp, overrides) # Else, let's query the lmodel for priors else: # Get the displayed state # TODO: for oclm this should be a list of (sym, prob) update = self.decision_maker.displayed_state # update the lmodel and get back the priors lm_prior = self.lmodel.state_update(update) # normalize to probability domain if needed if getattr(self.lmodel, 'normalized', False): lm_letter_prior = lm_prior['letter'] else: lm_letter_prior = norm_domain(lm_prior['letter']) if BACKSPACE_CHAR in self.alp: # Append backspace if missing. sym = (BACKSPACE_CHAR, self.backspace_prob) lm_letter_prior = sym_appended(lm_letter_prior, sym) # convert to format needed for evidence fusion; # probability value only in alphabet order. # TODO: ensure that probabilities still add to 1.0 prior = [ prior_prob for alp_letter in self.alp for prior_sym, prior_prob in lm_letter_prior if alp_letter == prior_sym ] # Try fusing the lmodel evidence try: prob_dist = self.conjugator.update_and_fuse( {'LM': np.array(prior)}) except Exception as lm_exception: print("Error updating language model!") raise lm_exception # Get decision maker to give us back some decisions and stimuli is_accepted, sti = self.decision_maker.decide(prob_dist) except Exception as init_exception: print("Error in initialize_epoch: %s" % (init_exception)) raise init_exception return is_accepted, sti