Exemplo n.º 1
0
def disjunction(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
   # bl = {'p': [r'$w_1$', r'$w_2$'], 'q':[r'$w_2$', r'$w_3$']}
    bl = {'p': ['1', '2'], 'q':['1', '3'], 'p & q': ['1'], 'p v q': ['1', '2', '3']}
    lexica = Lexica(baselexicon=bl,
                    atomic_states=['1', '2', '3'],
                    disjunction_cost=1.0,
                    conjunction_cost=0.0,
                    null_cost=5.0,                    
                    join_closure=True,
                    meet_closure=False,
                    block_trivial_messages=True,
                    block_ineffability=False)
    
    lexica.display()
    for key, val in lexica.lexica[8].items():
        print key, len(val), val
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=1.0)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalardisj-expertise-listener-marginalized.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker.pdf', n=n)
    #mod.plot_expertise_speaker(output_filename='../paper/fig/scalardisj-expertise-speaker-lexsum.pdf', n=n, lexsum=True)
    mod.plot_expertise_listener(n=n)
 def Q_implicature_simulation_datapoint(specific_cost, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs={GENERAL_MSG: 0.0, SPECIFIC_MSG: specific_cost}, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.repeat(1.0/len(lexica.states), len(lexica.states))
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     general_msg_index = lexica.messages.index(GENERAL_MSG)
     general_only_state = lexica.states.index(GENERAL_ONLY_REF)
     disj_state_index = lexica.states.index(DISJ_REF)
     disj_msg_index = lexica.messages.index(DISJ_MSG)
     speaker_val = speaker[disj_state_index, disj_msg_index]
     listener_val = listener[general_msg_index, general_only_state]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
 def I_implicature_simulation_datapoint(common_ref_prob, dcost=1.0, alpha=2.0):
     # Values to obtain:
     is_max = False
     listener_val = None
     speaker_val = None
     # Set-up:
     lexica = Lexica(baselexicon=BASELEXICON, costs=LEXICAL_COSTS, join_closure=True, nullsem=True, nullcost=5.0, disjunction_cost=dcost)
     ref_probs = np.array([common_ref_prob, (1.0-common_ref_prob)/2.0, (1.0-common_ref_prob)/2.0])
     lexprior = np.repeat(1.0/len(lexica.lexica2matrices()), len(lexica.lexica2matrices()))
     # Run the model:
     mod = Pragmod(lexica=lexica.lexica2matrices(), messages=lexica.messages, states=lexica.states, costs=lexica.cost_vector(), lexprior=lexprior, prior=ref_probs, alpha=alpha)
     langs = mod.run_expertise_model(n=3, display=False, digits=2)
     # Get the values we need:
     speaker = mod.speaker_lexical_marginalization(langs[-2])
     listener = mod.listener_lexical_marginalization(langs[-3])
     superkind_term_index = mod.messages.index(SUPERKIND_MSG)
     common_state_index = mod.states.index(COMMON_REF)
     disj_term_index = mod.messages.index(DISJ_MSG)
     disj_state_index = mod.states.index(DISJ_REF)
     # Fill in listener_val and speaker_val:
     listener_val = listener[superkind_term_index, common_state_index]
     speaker_val = speaker[disj_state_index, disj_term_index]
     # Determine whether max, with a bit of rounding to avoid spurious mismatch diagnosis:
     maxspkval = np.max(speaker[disj_state_index])
     is_max = np.round(speaker_val, 10) == np.round(maxspkval, 10)
     # Return values:
     return (listener_val, speaker_val, is_max)
Exemplo n.º 4
0
    def build(self):
        lex = Lexica(
            baselexicon=self.baselexicon,
            join_closure=True,
            disjunction_cost=self.disjunction_cost,
            nullsem=True,
            null_cost=self.null_cost,
            costs=copy.copy(self.lexical_costs),
            unknown_word=self.unknown_word,
        )

        self.lexica = lex.lexica2matrices()
        self.states = lex.states
        self.messages = lex.messages

        if self.prior == None:
            self.prior = np.repeat(1.0 / len(self.states), len(self.states))

        if self.lexprior == None:
            self.lexprior = np.repeat(1.0 / len(self.lexica), len(self.lexica))

        self.model = Pragmod(
            lexica=self.lexica,
            messages=self.messages,
            meanings=self.states,
            costs=lex.cost_vector(),
            prior=self.prior,
            lexprior=self.lexprior,
            temperature=self.temperature,
            alpha=self.alpha,
            beta=self.beta,
        )

        self.langs = self.model.run_expertise_model(n=self.n, display=True)
Exemplo n.º 5
0
def scalars(n=3):
    """Scalar example without and with disjunction; compare with Bergen et al.'s figure 5 and figure 9"""
    baselexicon = {'some': [r'$w_{\exists\neg\forall}$', r'$w_{\forall}$'], 'all': [r'$w_{\forall}$']}
    basic_lexica = Lexica(baselexicon=baselexicon)
    lexica = Lexica(baselexicon=baselexicon, join_closure=True, disjunction_cost=0.1)
    lexica.display()    
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.repeat(1.0/len(lexica.states), len(lexica.states)),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0)
    #mod.run_expertise_model(n=n, display=True, digits=4)
    #mod.plot_expertise_listener(output_filename='../paper/fig/scalar-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/scalar-expertise-speaker.pdf', n=n)
def main():
    if (len(sys.argv) < 2):
        print("ERRO - Por Favor Informe Um Arquivo .tpp")
    else:
        file = open(sys.argv[1], 'r', encoding='utf-8')
        sin = Sintatica(file.read(), Lexica().tokens)
        dot = Digraph(comment='TREE')
        Tree().printTree(sin.ast, dot)
        #dot.render('out/sintatica/normal.gv', view=True)
        Tree().poda(sin.ast)
        dot = Digraph(comment='TREE')
        Tree().printTreeCut(sin.ast, dot)
        #dot.render('out/semantica/poda.gv', view=True)
        sema = Semantica()
        sema.percorrerTree(sin.ast)
        sema.verificacoes()
        #sema.printTabSymbols()
        llvm.initialize()
        llvm.initialize_all_targets()
        llvm.initialize_native_target()
        llvm.initialize_native_asmparser()
        modulo = ir.Module(sys.argv[1])
        modulo.triple = llvm.get_process_triple()
        target = llvm.Target.from_triple(modulo.triple)
        targetMachine = target.create_target_machine()
        modulo.data_layout = targetMachine.target_data
        Geracao().percorrer(sin.ast, modulo)
        arquivo = open('teste.ll', 'w')
        arquivo.write(str(modulo))
        arquivo.close()
        print(modulo)
Exemplo n.º 7
0
 def __init__(self, code):
     lex = Lexica()
     self.tokens = lex.tokens
     self.precedence = ((('left', 'IGUALDADE', 'NEGACAO', 'MENOR_IGUAL',
                          'MAIOR', 'MAIOR_IGUAL', 'MENOR'), ('left', 'SOMA',
                                                             'SUBTRACAO'),
                         ('left', 'MULTIPLICACAO', 'DIVISAO')))
     parser = yacc.yacc(debug=True, module=self, optimize=False)
     self.ast = parser.parse(code)
Exemplo n.º 8
0
def manner(n=3):
    """Settings for Bergen et al.'s figure 6. Seems to reproduce the effects they report."""
    lexica = Lexica(baselexicon={'SHORT': [r'$w_{RARE}$', r'$w_{FREQ}$'], r'long': [r'$w_{RARE}$', r'$w_{FREQ}$']},
                    costs={'SHORT':1.0, r'long':2.0},
                    null_cost=5.0,
                    join_closure=False,
                    disjunction_cost=0.1)
    lexica.display()   
    mod = Pragmod(lexica=lexica.lexica2matrices(),
                  messages=lexica.messages,
                  meanings=lexica.states,
                  costs=lexica.cost_vector(),
                  prior=np.array([2.0/3.0, 1.0/3.0]),
                  lexprior=np.repeat(1.0/len(lexica), len(lexica)),
                  temperature=1.0,
                  alpha=3.0)
    mod.plot_expertise_listener(output_filename='../paper/fig/manner-expertise-listener-marginalized.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker.pdf', n=n)
    mod.plot_expertise_speaker(output_filename='../paper/fig/manner-expertise-speaker-lexsum.pdf', n=n, lexsum=True)
Exemplo n.º 9
0
    def __init__(self, code):
        lexer = Lexica()
        self.tokens = lexer.tokens

        self.precedence = (
            ('left', 'IGUAL', 'MAIOR_IGUAL', 'MAIOR', 'MENOR_IGUAL', 'MENOR'),
            ('left', 'MAIS', 'MENOS'),
            ('left', 'MULTIPLICACAO', 'DIVISAO'),
        )
        parser = yacc.yacc(debug=False, module=self, optimize=False)

        self.ast = parser.parse(code)
def main():
    if (len(sys.argv) < 2):
        print("ERRO - Por Favor Informe Um Arquivo .tpp")
    else:
        try:
            file = open(sys.argv[1], 'r', encoding='utf-8')
            arvore = Sintatica(file.read(), Lexica().tokens)
            w = Digraph('G', filename='out/teste.gv')
            Tree().printTree(arvore.ast, '', '', w, i=0)
            w.view()
        except Exception as e:
            print(e)
            print(
                "ERRO - Nao Foi Possivel Executar a Funcao, Por Favor Tente Novamente"
            )
Exemplo n.º 11
0
def main():
	if(len(sys.argv) < 2):
		print("ERRO - Por Favor Informe Um Arquivo .tpp")
	else:
			file = open(sys.argv[1], 'r', encoding='utf-8')
			sin = Sintatica(file.read(), Lexica().tokens)
			#dot = Digraph(comment='TREE')
			#Tree().printTree(sin.ast, dot)
			#dot.render('out/sintatica/normal.gv', view=True)
			Tree().poda(sin.ast)
			#dot = Digraph(comment='TREE')
			#Tree().printTreeCut(sin.ast, dot)
			#dot.render('out/semantica/poda.gv', view=True)
			sema = Semantica()
			sema.percorrerTree(sin.ast)
			sema.verificacoes()
Exemplo n.º 12
0
def main():
    if (len(sys.argv) < 2):
        print("ERRO - Por Favor Informe Um Arquivo .tpp")
    else:
        try:
            lexer = Lexica()
            lexer.readFile(sys.argv[1])
            lexer.printTokens()
        except:
            print(
                "ERRO - Nao Foi Possivel Executar a Função, Por Favor Tente Novamente"
            )
Exemplo n.º 13
0
 # Yacc example
 
import ply.yacc as yacc
import sys

 # Get the token map from the lexer.  This is required.
from lexica import Lexica

lexica = Lexica()
tokens = lexica.tokens

def p_expression_plus(p):
    'expression : expression MAIS term'
    p[0] = p[1] + p[3]
 
def p_expression_minus(p):
    'expression : expression MENOS term'
    p[0] = p[1] - p[3]
 
def p_expression_term(p):
    'expression : term'
    p[0] = p[1]
 
def p_term_times(p):
    'term : term MULTIPLICACAO factor'
    p[0] = p[1] * p[3]
 
def p_term_div(p):
    'term : term DIVISAO factor'
    p[0] = p[1] / p[3]