def Q_implicature_simulation_datapoint(specific_cost, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs={GENERAL_MSG: 0.0, SPECIFIC_MSG: specific_cost}, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.repeat(1.0/len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
 def I_implicature_simulation_datapoint(common_ref_prob, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs=LEXICAL_COSTS, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.array([common_ref_prob, (1.0-common_ref_prob)/2.0, (1.0-common_ref_prob)/2.0])
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)