Example #1
0
def compositional_disjunction():
    lexica = Lexica(baselexicon={
        p: [w1, w2],
        q: [w1, w3],
        pandq: [w1]
    },
                    costs={
                        p: 0.0,
                        q: 0.0,
                        pandq: 1.0
                    },
                    join_closure=True,
                    nullsem=True,
                    nullcost=5.0,
                    disjunction_cost=1.0)
    lexica.display()
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  states=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0 / len(lexica.states),
                                  len(lexica.states)),
                  lexprior=np.repeat(1.0 / len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=1.0,
                  beta=1.0)
    mod.run_expertise_model(n=2, display=True, digits=2)
Example #2
0
def basic_scalar():
    lexica = [np.array([[1.0,1.0], [1.0, 0.0], [1.0, 1.0]]),
              np.array([[1.0,0.0], [1.0, 0.0], [1.0, 1.0]]),
              np.array([[0.0,1.0], [1.0, 0.0], [1.0, 1.0]])]    
    mod = Pragmod(lexica=lexica,
                  messages=['cheap', 'free', NULL_MSG],
                  states=[w1, w2],
                  costs=np.array([0.0, 0.0, 5.0]),
                  prior=np.repeat(1.0/2.0, 2),
                  lexprior=np.repeat(1.0/3.0, 3),
                  temperature=1.0,
                  alpha=1.0,
                  beta=2.0)
    for lex in lexica:
        print("=" * 70)
        print(mod.lex2str(lex))
        mod.run_base_model(lex, n=2, display=True, digits=2)      
    mod.run_expertise_model(n=3, display=True, digits=2)
Example #3
0
def generic_disjunction_example(alpha=1.0,
                                beta=1.0,
                                disjunction_cost=1.0,
                                n=2,
                                fulldisplay=False,
                                unknown_word=None):
    """Common code for our two illustrative examples, which
    differ only in the above keyword parameters. Increase n to see
    greater depths of recursion. use fulldisplay=True to see more
    details."""
    # Use the lexicon generation convenience function to
    # generate all the join-closure lexica and calculate
    # the necessary message costs:
    lexica = Lexica(baselexicon={
        'A': ['1'],
        'B': ['2'],
        'X': ['1', '2']
    },
                    costs={
                        'A': 0.0,
                        'B': 0.0,
                        'X': 0.0
                    },
                    join_closure=True,
                    nullsem=True,
                    nullcost=5.0,
                    disjunction_cost=disjunction_cost,
                    unknown_word=unknown_word)
    # Lexical matrices:
    lexmats = lexica.lexica2matrices()
    # Pragmatic models for the above lexical space.
    mod = Pragmod(lexica=lexmats,
                  messages=lexica.messages,
                  states=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0 / len(lexica.states),
                                  len(lexica.states)),
                  lexprior=np.repeat(1.0 / len(lexmats), len(lexmats)),
                  temperature=1.0,
                  alpha=alpha,
                  beta=beta)
    if fulldisplay:
        lexica.display()
        # Run the base model on the individual lexica so we can show
        # those lower steps:
        for lex in lexmats:
            print("=" * 70)
            print(mod.lex2str(lex))
            mod.run_base_model(lex, n=2, display=True, digits=2)
    ## Run the anxious experts model - fulldisplay=True for a fuller picture:
    langs = mod.run_expertise_model(n=n, display=fulldisplay, digits=2)
    # Look at the specific table we care about:
    msg_index = mod.messages.index('A v X')
    final_listener = langs[-1]
    mod.display_joint_listener(final_listener[msg_index], digits=2)
    return langs
Example #4
0
def compositional_disjunction():
    lexica = Lexica(
        baselexicon={p: [w1, w2], q: [w1, w3], pandq:[w1]},
        costs={p:0.0, q:0.0, pandq:1.0},
        join_closure=True,
        nullsem=True,
        nullcost=5.0,
        disjunction_cost=1.0)
    lexica.display()
    mod = Pragmod(
        lexica=lexica.lexica2matrices(),
        messages=lexica.messages,
        states=lexica.states,
        costs=lexica.cost_vector(),
        prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
        lexprior=np.repeat(1.0/len(lexica), len(lexica)),
        temperature=1.0,
        alpha=1.0,
        beta=1.0)
    mod.run_expertise_model(n=2, display=True, digits=2)
Example #5
0
def basic_scalar():
    lexica = [
        np.array([[1.0, 1.0], [1.0, 0.0], [1.0, 1.0]]),
        np.array([[1.0, 0.0], [1.0, 0.0], [1.0, 1.0]]),
        np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
    ]
    mod = Pragmod(lexica=lexica,
                  messages=['cheap', 'free', NULL_MSG],
                  states=[w1, w2],
                  costs=np.array([0.0, 0.0, 5.0]),
                  prior=np.repeat(1.0 / 2.0, 2),
                  lexprior=np.repeat(1.0 / 3.0, 3),
                  temperature=1.0,
                  alpha=1.0,
                  beta=2.0)
    for lex in lexica:
        print("=" * 70)
        print(mod.lex2str(lex))
        mod.run_base_model(lex, n=2, display=True, digits=2)
    mod.run_expertise_model(n=3, display=True, digits=2)
Example #6
0
 def explore_listener_parameters(self):
     """Explore a large parameter space, classifying the parameter vectors
     based on the max listener <world,lex> inference given self.msg""" 
     results = defaultdict(list)
     for dcost, alpha, beta, depth in product(self.dcosts,
                                              self.alphas,
                                              self.betas,
                                              self.depths):
         params = {'alpha': alpha,
                   'beta': beta,
                   'depth': depth,
                   'disjunction_cost': dcost}
         lexica = Lexica(
             baselexicon=self.baselexicon, 
             costs=self.lexical_costs,
             join_closure=True,
             nullsem=True,
             nullcost=5.0,
             disjunction_cost=dcost,
             unknown_word=self.unknown_word)
         lexmats = lexica.lexica2matrices()
         mod = Pragmod(
             lexica=lexmats,
             messages=lexica.messages,
             states=lexica.states,
             costs=lexica.cost_vector(),
             prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
             lexprior=np.repeat(1.0/len(lexmats), len(lexmats)),
             temperature=1.0,
             alpha=alpha,
             beta=beta)
         # Run the model:
         langs = mod.run_expertise_model(n=depth, display=False)
         # Get the listener's joint probability table for this message:
         msg_index = mod.messages.index(self.msg)
         prob_table = langs[-1][msg_index]        
         sorted_probs = sorted(prob_table.flatten())
         max_pair = None
         max_prob = sorted_probs[-1]
         # No ties allowed!
         if max_prob != sorted_probs[-2]:
             for i, j in product(list(range(prob_table.shape[0])),
                                 list(range(prob_table.shape[1]))):
                 if prob_table[i, j] == max_prob:
                     max_pair = (i, mod.states[j])
         # Add the target probability:
         params['prob'] = max_prob
         # Print to show progress:
         print(max_pair, params)
         # Store this dictionary of results -- parameters plus the predicted probability
         # max_pair is a lexicon index and state name.
         results[max_pair].append(params)
     return results
Example #7
0
 def explore_listener_parameters(self):
     """Explore a large parameter space, classifying the parameter vectors
     based on the max listener <world,lex> inference given self.msg"""
     results = defaultdict(list)
     for dcost, alpha, beta, depth in product(self.dcosts, self.alphas,
                                              self.betas, self.depths):
         params = {
             'alpha': alpha,
             'beta': beta,
             'depth': depth,
             'disjunction_cost': dcost
         }
         lexica = Lexica(baselexicon=self.baselexicon,
                         costs=self.lexical_costs,
                         join_closure=True,
                         nullsem=True,
                         nullcost=5.0,
                         disjunction_cost=dcost,
                         unknown_word=self.unknown_word)
         lexmats = lexica.lexica2matrices()
         mod = Pragmod(lexica=lexmats,
                       messages=lexica.messages,
                       states=lexica.states,
                       costs=lexica.cost_vector(),
                       prior=np.repeat(1.0 / len(lexica.states),
                                       len(lexica.states)),
                       lexprior=np.repeat(1.0 / len(lexmats), len(lexmats)),
                       temperature=1.0,
                       alpha=alpha,
                       beta=beta)
         # Run the model:
         langs = mod.run_expertise_model(n=depth, display=False)
         # Get the listener's joint probability table for this message:
         msg_index = mod.messages.index(self.msg)
         prob_table = langs[-1][msg_index]
         sorted_probs = sorted(prob_table.flatten())
         max_pair = None
         max_prob = sorted_probs[-1]
         # No ties allowed!
         if max_prob != sorted_probs[-2]:
             for i, j in product(list(range(prob_table.shape[0])),
                                 list(range(prob_table.shape[1]))):
                 if prob_table[i, j] == max_prob:
                     max_pair = (i, mod.states[j])
         # Add the target probability:
         params['prob'] = max_prob
         # Print to show progress:
         print(max_pair, params)
         # Store this dictionary of results -- parameters plus the predicted probability
         # max_pair is a lexicon index and state name.
         results[max_pair].append(params)
     return results
Example #8
0
def generic_disjunction_example(
        alpha=1.0,
        beta=1.0,
        disjunction_cost=1.0,
        n=2,
        fulldisplay=False,
        unknown_word=None):
    """Common code for our two illustrative examples, which
    differ only in the above keyword parameters. Increase n to see
    greater depths of recursion. use fulldisplay=True to see more
    details."""    
    # Use the lexicon generation convenience function to
    # generate all the join-closure lexica and calculate
    # the necessary message costs:
    lexica = Lexica(
        baselexicon={'A': ['1'], 'B': ['2'], 'X':['1', '2']}, 
        costs={'A':0.0, 'B':0.0, 'X':0.0},
        join_closure=True,
        nullsem=True,
        nullcost=5.0,
        disjunction_cost=disjunction_cost,
        unknown_word=unknown_word)
    # Lexical matrices:
    lexmats = lexica.lexica2matrices()         
    # Pragmatic models for the above lexical space.
    mod = Pragmod(
        lexica=lexmats,
        messages=lexica.messages,
        states=lexica.states,
        costs=lexica.cost_vector(),
        prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
        lexprior=np.repeat(1.0/len(lexmats), len(lexmats)),
        temperature=1.0,
        alpha=alpha,
        beta=beta)
    if fulldisplay:
        lexica.display()
        # Run the base model on the individual lexica so we can show
        # those lower steps:
        for lex in lexmats:
            print("=" * 70)
            print(mod.lex2str(lex))
            mod.run_base_model(lex, n=2, display=True, digits=2)         
    ## Run the anxious experts model - fulldisplay=True for a fuller picture:
    langs = mod.run_expertise_model(n=n, display=fulldisplay, digits=2)
    # Look at the specific table we care about:
    msg_index = mod.messages.index('A v X')
    final_listener = langs[-1]
    mod.display_joint_listener(final_listener[msg_index], digits=2)
    return langs
Example #9
0
 def I_implicature_simulation_datapoint(common_ref_prob,
                                        dcost=1.0,
                                        alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON,
                     costs=LEXICAL_COSTS,
                     join_closure=True,
                     nullsem=True,
                     nullcost=5.0,
                     disjunction_cost=dcost)
     ref_probs = np.array([
         common_ref_prob, (1.0 - common_ref_prob) / 2.0,
         (1.0 - common_ref_prob) / 2.0
     ])
     lexprior = np.repeat(1.0 / len(lexica.lexica2matrices()),
                          len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(),
                   messages=lexica.messages,
                   states=lexica.states,
                   costs=lexica.cost_vector(),
                   lexprior=lexprior,
                   prior=ref_probs,
                   alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid
     # spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Example #10
0
 def I_implicature_simulation_datapoint(
         common_ref_prob, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(
         baselexicon=BASELEXICON,
         costs=LEXICAL_COSTS,
         join_closure=True,
         nullsem=True,
         nullcost=5.0,
         disjunction_cost=dcost)
     ref_probs = np.array([common_ref_prob, (1.0-common_ref_prob)/2.0,
                           (1.0-common_ref_prob)/2.0])
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()),
                          len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(
         lexica=lexica.lexica2matrices(),
         messages=lexica.messages,
         states=lexica.states,
         costs=lexica.cost_vector(),
         lexprior=lexprior,
         prior=ref_probs,
         alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid
     # spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Example #11
0
 def Q_implicature_simulation_datapoint(specific_cost,
                                        dcost=1.0,
                                        alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON,
                     costs={
                         GENERAL_MSG: 0.0,
                         SPECIFIC_MSG: specific_cost
                     },
                     join_closure=True,
                     nullsem=True,
                     nullcost=5.0,
                     disjunction_cost=dcost)
     ref_probs = np.repeat(1.0 / len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0 / len(lexica.lexica2matrices()),
                          len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(),
                   messages=lexica.messages,
                   states=lexica.states,
                   costs=lexica.cost_vector(),
                   lexprior=lexprior,
                   prior=ref_probs,
                   alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious
     # mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Example #12
0
 def Q_implicature_simulation_datapoint(
         specific_cost, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(
         baselexicon=BASELEXICON,
         costs={GENERAL_MSG: 0.0, SPECIFIC_MSG: specific_cost},
         join_closure=True,
         nullsem=True,
         nullcost=5.0,
         disjunction_cost=dcost)
     ref_probs = np.repeat(1.0/len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()),
                          len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(
         lexica=lexica.lexica2matrices(),
         messages=lexica.messages,
         states=lexica.states,
         costs=lexica.cost_vector(),
         lexprior=lexprior,
         prior=ref_probs,
         alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious
     # mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)