def update_posterior(posterior, signal, context): new_posterior = np.zeros(len(posterior)) for i in range(len(posterior)): # for each hypothesis language = lp_pairs[i][0] perspective = lp_pairs[i][1] ref_distribution = calc_mental_state(perspective, context) marginalize = np.zeros(len(meanings)) for m in meanings: # get the list of signals which can be used for the given meaning signals_for_r = [ signals[s] for s in range(len(meanings)) if language[m][s] == '1' ] num_signals_for_r = len(signals_for_r) # compute the product of the probability that the speaker chooses referent r and that signal s is produced if signal in signals_for_r: if num_signals_for_r == num_signals: in_language = log(1 / num_signals_for_r) else: in_language = log((1 - noise) / num_signals_for_r) marginalize[m] = ref_distribution[m] + in_language else: out_of_language = log(noise / (num_signals - num_signals_for_r)) marginalize[m] = ref_distribution[m] + out_of_language new_posterior[i] = posterior[i] + logsumexp(marginalize) return utilities.normalize_logprobs(new_posterior)
def list1_perception_matrix(language, ref_distribution): mat = np.zeros((len(signals), len(meanings))) for s in range(len(signals)): row = np.zeros(len(meanings)) for m in meanings: row[m] = list1_lit_spkr(signals[s], m, language, ref_distribution) mat[s] = utilities.normalize_logprobs(row) return mat
def calc_mental_state(perspective, context): """ Given speaker's perspective and the context, compute a probability distribution over the referents of how likely the speaker is to speak about each referent p. 88 Equation 3.1 """ distribution = np.zeros(len(context)) for o in range(len(context)): distribution[o] = log(1 - abs(perspective - context[o])) return utilities.normalize_logprobs(distribution)
def list1_perception_matrix(language, ref_distribution): """ Turn the level-1 learner's model of the literal speaker into a listener's perception matrix """ mat = np.zeros((len(signals), len(meanings))) for s in range(len(signals)): row = np.zeros(len(meanings)) for m in meanings: row[m] = learn1_lit_spkr(signals[s], m, language, ref_distribution) mat[s] = utilities.normalize_logprobs(row) return mat
def update_posterior(posterior, signal, context): new_posterior = np.zeros(len(posterior)) for i in range(len(posterior)): # for each hypothesis language = languages[lp_pairs[i][0]] perspective = perspectives[lp_pairs[i][1]] ref_distribution = calc_mental_state(perspective, context) marginalize = np.zeros(len(meanings)) for m in meanings: marginalize[m] = list2_spkr1(signal, m, language, ref_distribution) # marginalize[m] = list1_lit_spkr(signal, meaning, language, ref_distribution) # level-1 listener new_posterior[i] = posterior[i] + logsumexp(marginalize) return utilities.normalize_logprobs(new_posterior)
def update_posterior(posterior, signal, context): """ Update the posterior probabilities the learner has assigned to each lexicon/perspective pair based on the observed signal and context """ new_posterior = np.zeros(len(posterior)) for i in range(len(posterior)): # for each hypothesis language = languages[hypotheses[i][0]] perspective = perspectives[hypotheses[i][1]] pragmatic_lvl = pragmatic_levels[hypotheses[i][2]] ref_distribution = calc_mental_state(perspective, context) marginalize = np.zeros(len(meanings)) if pragmatic_lvl == 0: for m in meanings: marginalize[m] = learn1_lit_spkr(signal, m, language, ref_distribution) # level-1 learner elif pragmatic_lvl == 1: for m in meanings: marginalize[m] = learn2_spkr1(signal, m, language, ref_distribution) # level-2 learner new_posterior[i] = posterior[i] + logsumexp(marginalize) return utilities.normalize_logprobs(new_posterior)
def calc_mental_state(perspective, context): distribution = np.zeros(len(context)) for o in range(len(context)): distribution[o] = log(1 - abs(perspective - context[o])) return utilities.normalize_logprobs(distribution)