Пример #1
0
def generic_disjunction_example(alpha=1.0,
                                beta=1.0,
                                disjunction_cost=1.0,
                                n=2,
                                fulldisplay=False,
                                unknown_word=None):
    """Common code for our two illustrative examples, which
    differ only in the above keyword parameters. Increase n to see
    greater depths of recursion. use fulldisplay=True to see more
    details."""
    # Use the lexicon generation convenience function to
    # generate all the join-closure lexica and calculate
    # the necessary message costs:
    lexica = Lexica(baselexicon={
        'A': ['1'],
        'B': ['2'],
        'X': ['1', '2']
    },
                    costs={
                        'A': 0.0,
                        'B': 0.0,
                        'X': 0.0
                    },
                    join_closure=True,
                    nullsem=True,
                    nullcost=5.0,
                    disjunction_cost=disjunction_cost,
                    unknown_word=unknown_word)
    # Lexical matrices:
    lexmats = lexica.lexica2matrices()
    # Pragmatic models for the above lexical space.
    mod = Pragmod(lexica=lexmats,
                  messages=lexica.messages,
                  states=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0 / len(lexica.states),
                                  len(lexica.states)),
                  lexprior=np.repeat(1.0 / len(lexmats), len(lexmats)),
                  temperature=1.0,
                  alpha=alpha,
                  beta=beta)
    if fulldisplay:
        lexica.display()
        # Run the base model on the individual lexica so we can show
        # those lower steps:
        for lex in lexmats:
            print("=" * 70)
            print(mod.lex2str(lex))
            mod.run_base_model(lex, n=2, display=True, digits=2)
    ## Run the anxious experts model - fulldisplay=True for a fuller picture:
    langs = mod.run_expertise_model(n=n, display=fulldisplay, digits=2)
    # Look at the specific table we care about:
    msg_index = mod.messages.index('A v X')
    final_listener = langs[-1]
    mod.display_joint_listener(final_listener[msg_index], digits=2)
    return langs
Пример #2
0
def simple_disjunction():
    lexicon = np.array([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 0.0, 0.0],
                        [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
    mod = Pragmod(lexica=None,
                  messages=[p, q, pandq, porq, NULL_MSG],
                  states=[w1, w2, w3],
                  costs=np.array([0.0, 0.0, 1.0, 1.0, 5.0]),
                  prior=np.repeat(1.0 / 4.0, 3),
                  lexprior=None,
                  temperature=1.0,
                  alpha=1.0,
                  beta=0.0)
    mod.run_base_model(lexicon, n=2, display=True, digits=2)
Пример #3
0
def generic_disjunction_example(
        alpha=1.0,
        beta=1.0,
        disjunction_cost=1.0,
        n=2,
        fulldisplay=False,
        unknown_word=None):
    """Common code for our two illustrative examples, which
    differ only in the above keyword parameters. Increase n to see
    greater depths of recursion. use fulldisplay=True to see more
    details."""    
    # Use the lexicon generation convenience function to
    # generate all the join-closure lexica and calculate
    # the necessary message costs:
    lexica = Lexica(
        baselexicon={'A': ['1'], 'B': ['2'], 'X':['1', '2']}, 
        costs={'A':0.0, 'B':0.0, 'X':0.0},
        join_closure=True,
        nullsem=True,
        nullcost=5.0,
        disjunction_cost=disjunction_cost,
        unknown_word=unknown_word)
    # Lexical matrices:
    lexmats = lexica.lexica2matrices()         
    # Pragmatic models for the above lexical space.
    mod = Pragmod(
        lexica=lexmats,
        messages=lexica.messages,
        states=lexica.states,
        costs=lexica.cost_vector(),
        prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
        lexprior=np.repeat(1.0/len(lexmats), len(lexmats)),
        temperature=1.0,
        alpha=alpha,
        beta=beta)
    if fulldisplay:
        lexica.display()
        # Run the base model on the individual lexica so we can show
        # those lower steps:
        for lex in lexmats:
            print("=" * 70)
            print(mod.lex2str(lex))
            mod.run_base_model(lex, n=2, display=True, digits=2)         
    ## Run the anxious experts model - fulldisplay=True for a fuller picture:
    langs = mod.run_expertise_model(n=n, display=fulldisplay, digits=2)
    # Look at the specific table we care about:
    msg_index = mod.messages.index('A v X')
    final_listener = langs[-1]
    mod.display_joint_listener(final_listener[msg_index], digits=2)
    return langs
Пример #4
0
def simple_disjunction():    
    lexicon = np.array([[1.0, 1.0, 0.0],
                        [1.0, 0.0, 1.0],
                        [1.0, 0.0, 0.0],
                        [1.0, 1.0, 1.0],
                        [1.0, 1.0, 1.0]])
    mod = Pragmod(
        lexica=None,
        messages=[p, q, pandq, porq, NULL_MSG],
        states=[w1, w2, w3],
        costs=np.array([0.0, 0.0, 1.0, 1.0, 5.0]),
        prior=np.repeat(1.0/4.0, 3),
        lexprior=None,
        temperature=1.0,
        alpha=1.0,
        beta=0.0)
    mod.run_base_model(lexicon, n=2, display=True, digits=2)
Пример #5
0
def basic_scalar():
    lexica = [np.array([[1.0,1.0], [1.0, 0.0], [1.0, 1.0]]),
              np.array([[1.0,0.0], [1.0, 0.0], [1.0, 1.0]]),
              np.array([[0.0,1.0], [1.0, 0.0], [1.0, 1.0]])]    
    mod = Pragmod(lexica=lexica,
                  messages=['cheap', 'free', NULL_MSG],
                  states=[w1, w2],
                  costs=np.array([0.0, 0.0, 5.0]),
                  prior=np.repeat(1.0/2.0, 2),
                  lexprior=np.repeat(1.0/3.0, 3),
                  temperature=1.0,
                  alpha=1.0,
                  beta=2.0)
    for lex in lexica:
        print("=" * 70)
        print(mod.lex2str(lex))
        mod.run_base_model(lex, n=2, display=True, digits=2)      
    mod.run_expertise_model(n=3, display=True, digits=2)
Пример #6
0
def basic_scalar():
    lexica = [
        np.array([[1.0, 1.0], [1.0, 0.0], [1.0, 1.0]]),
        np.array([[1.0, 0.0], [1.0, 0.0], [1.0, 1.0]]),
        np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
    ]
    mod = Pragmod(lexica=lexica,
                  messages=['cheap', 'free', NULL_MSG],
                  states=[w1, w2],
                  costs=np.array([0.0, 0.0, 5.0]),
                  prior=np.repeat(1.0 / 2.0, 2),
                  lexprior=np.repeat(1.0 / 3.0, 3),
                  temperature=1.0,
                  alpha=1.0,
                  beta=2.0)
    for lex in lexica:
        print("=" * 70)
        print(mod.lex2str(lex))
        mod.run_base_model(lex, n=2, display=True, digits=2)
    mod.run_expertise_model(n=3, display=True, digits=2)