def Q_implicature_simulation_datapoint(specific_cost, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs={GENERAL_MSG: 0.0, SPECIFIC_MSG: specific_cost}, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.repeat(1.0/len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
 def I_implicature_simulation_datapoint(common_ref_prob, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs=LEXICAL_COSTS, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.array([common_ref_prob, (1.0-common_ref_prob)/2.0, (1.0-common_ref_prob)/2.0])
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
def disjunction(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
   # bl = {'p': [r'$w_1$', r'$w_2$'], 'q':[r'$w_2$', r'$w_3$']}
    bl = {'p': ['1', '2'], 'q':['1', '3'], 'p & q': ['1'], 'p v q': ['1', '2', '3']}
    lexica = Lexica(baselexicon=bl,
                    atomic_states=['1', '2', '3'],
                    disjunction_cost=1.0,
                    conjunction_cost=0.0,
                    null_cost=5.0,                    
                    join_closure=True,
                    meet_closure=False,
                    block_trivial_messages=True,
                    block_ineffability=False)
    
    lexica.display()
    for key, val in lexica.lexica[8].items():
        print key, len(val), val
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=1.0)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalardisj-expertise-listener-marginalized.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker-lexsum.pdf', n=n, lexsum=True)
    mod.plot_expertise_listener(n=n)
Beispiel #4
0
    def build(self):
        lex = Lexica(
            baselexicon=self.baselexicon,
            join_closure=True,
            disjunction_cost=self.disjunction_cost,
            nullsem=True,
            null_cost=self.null_cost,
            costs=copy.copy(self.lexical_costs),
            unknown_word=self.unknown_word,
        )

        self.lexica = lex.lexica2matrices()
        self.states = lex.states
        self.messages = lex.messages

        if self.prior == None:
            self.prior = np.repeat(1.0 / len(self.states), len(self.states))

        if self.lexprior == None:
            self.lexprior = np.repeat(1.0 / len(self.lexica), len(self.lexica))

        self.model = Pragmod(
            lexica=self.lexica,
            messages=self.messages,
            meanings=self.states,
            costs=lex.cost_vector(),
            prior=self.prior,
            lexprior=self.lexprior,
            temperature=self.temperature,
            alpha=self.alpha,
            beta=self.beta,
        )

        self.langs = self.model.run_expertise_model(n=self.n, display=True)
def scalars(n=3):
    """Scalar example without and with disjunction; compare with Bergen et al.'s figure 5 and figure 9"""
    baselexicon = {'some': [r'$w_{\exists\neg\forall}$', r'$w_{\forall}$'], 'all': [r'$w_{\forall}$']}
    basic_lexica = Lexica(baselexicon=baselexicon)
    lexica = Lexica(baselexicon=baselexicon, join_closure=True, disjunction_cost=0.1)
    lexica.display()    
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0)
    #mod.run_expertise_model(n=n, display=True, digits=4)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalar-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/scalar-expertise-speaker.pdf', n=n)
def manner(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
    lexica = Lexica(baselexicon={'SHORT': [r'$w_{RARE}$', r'$w_{FREQ}$'], r'long': [r'$w_{RARE}$', r'$w_{FREQ}$']},
                    costs={'SHORT':1.0, r'long':2.0},
                    null_cost=5.0,
                    join_closure=False,
                    disjunction_cost=0.1)
    lexica.display()   
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.array([2.0/3.0, 1.0/3.0]),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=3.0)
    mod.plot_expertise_listener(output_filename='../paper/fig/manner-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker-lexsum.pdf', n=n, lexsum=True)