def model(rule_firing, lf):
    """
    We will create a model on two rules. We will let the pyMC find the best value for firing a rule.
    """
    #adding stuff to goal buffer
    counting.decmems = {
    }  #we have to clean all the memories first, because each loop adds chunks into a memory and we want to ignore these
    counting.set_decmem(
        dd)  #we then add only memory chunks that are present at the beginning
    counting.goal.add(actr.chunkstring(string="isa countFrom start 2 end 4"))
    counting.model_parameters["latency_factor"] = lf
    counting.model_parameters["rule_firing"] = rule_firing
    sim = counting.simulation(trace=False)
    while True:
        last_time = sim.show_time()
        if last_time > 10:  #if the value is unreasonably high, break
            last_time = 10.0
            break
        try:
            sim.step()
        except simpy.core.EmptySchedule:
            last_time = 10.0  #some high value so it is clear that this is not the right way
            break
        if not counting.goal:
            break
    return np.repeat(np.array(last_time), size)
예제 #2
0
def model(lf):
    """
    We will create a model on two rules. We will let the pyMC find the best value for firing a rule.
    """
    #adding stuff to goal buffer
    counting.decmems = {
    }  #we have to clean all the memories first, because each loop adds chunks into a memory and we want to ignore these
    counting.set_decmem(
        dd)  #we then add only memory chunks that are present at the beginning
    counting.goal.add(actr.chunkstring(
        string="isa countFrom start 2 end 4"))  # starting goal
    counting.model_parameters["latency_factor"] = lf
    sim = counting.simulation(trace=False)
    last_time = 0
    while True:
        if last_time > 10:  #if the value is unreasonably high, which might happen with weird proposed estimates, break
            last_time = 10.0
            break
        try:
            sim.step()  # run one step ahead in simulation
            last_time = sim.show_time()
        except simpy.core.EmptySchedule:  #if you run out of actions, break
            last_time = 10.0  #some high value time so it is clear that this is not the right way to end
            break
        if not counting.goal:  #if goal cleared (as should happen when you finish the task correctly and reach stop, break)
            break

    return np.repeat(np.array(1000 * last_time), size)  # we return time in ms
def model(rule_firing):
    """
    We will create a model on two rules. We will let the pyMC find the best value for firing a rule.
    """
    counting.decmems = {}
    counting.set_decmem(dd)
    #adding stuff to goal buffer
    counting.goal.add(actr.chunkstring(string="isa countFrom start 2 end 4"))
    counting.model_parameters["rule_firing"] = rule_firing
    sim = counting.simulation(trace=False)
    while True:
        last_time = sim.show_time()
        try:
            sim.step()
        except simpy.core.EmptySchedule:
            break
        if not counting.goal:
            break
    return np.repeat(
        np.array(last_time),
        size)  #what is outputed -- nparray of simulated time points
예제 #4
0
    #parser.productionstring(name="finished: no visual input", string="""
    #=g>
    #isa             parsing_goal
    #task            reading_word
    #=visual_location>
    #isa _visuallocation
    #screen_y =ypos
    #screen_x    """ + str(last_pos_word) + """
    #==>
    #~g>
    #~imaginal>""")
    parser.goals["g"].add(
        actr.chunkstring(string="""
        isa             parsing_goal
        task            reading_word
        stack1          'S'
        right_frontier_stack1  'S'
        right_frontier_stack2  None
    """))

    if simulate:
        parser_sim = parser.simulation(
            realtime=True,
            gui=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=10)
    else:
        parser_sim = parser.simulation(
            realtime=False,
def read(parser,
         sentence=None,
         pos=None,
         critical=None,
         actions=actions,
         blind_actions=actions,
         word_freq=word_freq,
         label_freq=label_freq,
         strength_of_association={},
         decmem={},
         lexical=True,
         visual=True,
         syntactic=True,
         reanalysis=True,
         prints=True):
    """
    Read a sentence.

    :param sentence: what sentence should be read (list).
    :param pos: what pos should be used (list, matching in length with sentence).
    :param actions: dataframe of actions
    :param lexical - should lexical information affect reading time?
    :param visual - should visual information affect reading time?
    :param syntactic - should syntactic information affect reading time?
    :param reanalysis - should reanalysis of parse affect reading time?
    """

    parser.set_decmem(decmem)  # TODO: dont remove??
    tobe_removed = {
        i
        for i in range(len(sentence))
        if (re.match("[:]+", sentence[i]) or sentence[i] == "'s")
        and i != len(sentence) - 1
    }  #remove what is not a full word
    print(sentence)
    for x in tobe_removed:
        print(sentence[x])

    critical_rules = dict()
    # for critical sentences you can assume that specific rules apply to ensure that parsing of gp sentences proceeds correctly
    # for example for sentence 'the horse raced past the barn fell', the following would work on the noun horse (uncomment if using)
    #critical_rules = {'1': [['reduce_unary', 'NP_BAR'], ['reduce_binary', 'NP'], ['shift', "''"]]}

    if not lexical:
        for x in parser.decmem:
            parser.decmem.activations[
                x] = 100  #this is to nulify the effect of word retrieval to almost 0

    parser.retrievals = {}
    parser.set_retrieval("retrieval")
    parser.visbuffers = {}
    parser.goals = {}
    parser.set_goal("g")
    parser.set_goal(name="imaginal", delay=0)
    parser.set_goal(name="imaginal_reanalysis", delay=0)
    parser.set_goal("word_info")

    stimuli = [{} for i in range(len(sentence))]
    pos_word = 10
    environment.current_focus = (pos_word + 7 +
                                 7 * visual_effect(sentence[0], visual), 180)
    pos_words = []
    for x in range(41):
        #this removes any move eyes created previously; we assume that no sentence is longer than 20 words
        parser.productionstring(name="move eyes" + str(x),
                                string="""
        =g>
        isa         reading
        state       dummy
        ==>
        =g>
        isa         reading
        state       dummy""")

    # create fixed rules for eye movements
    for i, word in enumerate(sentence):
        pos_word += 7 + 7 * visual_effect(word, visual)
        pos_words.append((pos_word, 180))
        for j in range(len(stimuli)):
            stimuli[j].update({
                i: {
                    'text': word,
                    'position': (pos_word, 180),
                    'vis_delay': visual_effect(word, visual)
                }
            })

        if i < len(sentence) - 3:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        processor       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 2:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        processor       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 1:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        None
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")

    if prints:
        print(sentence)

    parser.goals["g"].add(
        actr.chunkstring(string="""
    isa             reading
    state           reading_word
    position        0
    tag             """ + str(pos[0])))

    parser.goals["imaginal"].add(
        actr.chunkstring(string="""
    isa             action_chunk
    TREE1_LABEL         NOPOS
    TREE1_HEAD          noword
    TREE2_LABEL         xxx
    TREE2_HEAD          xxx
    TREE3_LABEL         xxx
    TREE3_HEAD          xxx
    ANTECEDENT_CARRIED  NO
    WORD_NEXT0_LEX   """ + '"' + str(sentence[1]) + '"' + """
    WORD_NEXT0_POS   """ + str(pos[1])))

    # start a dictionary that will collect all created structures, and a list of built constituents
    constituents = {}
    built_constituents = [(Tree("xxx", []), (None, "xxx")),
                          (Tree("xxx", []), (None, "xxx")),
                          (Tree("NOPOS", []), (None, "noword"))]
    final_tree = Tree("X", [])

    if prints:
        parser_sim = parser.simulation(
            realtime=False,
            gui=False,
            trace=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)
    else:
        parser_sim = parser.simulation(
            realtime=False,
            gui=True,
            trace=False,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)

    antecedent_carried = "NO"
    what_antecedent_carried = None

    eyemove_times = []  #reaction times per word
    reanalysis_list, words_list, activations_list, agreeing_actions_list, matching_fs_list, total_fan_list, actions_list = [], [], [], [], [], [], [] #collects total activation of rules, agreeing_actions... per sentence (used to find out what plays a role in syntactic parsing for RTs)
    wh_gaps_list = []

    word_parsed = 0
    last_time = 0

    activations, agreeing_actions, matching_fs, total_fan = [], [], [], [
    ]  #collects total activation of rules, agreeing_actions... per word (used to find out what plays a role in syntactic parsing for RTs)

    retrieve_wh_reanalysis = None

    while True:
        try:
            parser_sim.step()
            #print(parser_sim.current_event)
        except simpy.core.EmptySchedule:
            eyemove_times = [
                10 for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report time-out time per word (40 s) or nan
            break
        if parser_sim.show_time() > 60:
            eyemove_times = [
                10 for _ in sentence
            ]  #this takes care of looping or excessive time spent - break if you loop (40 s should be definitely enough to move on)
            break
        if re.search("^SHIFT COMPLETE", str(parser_sim.current_event.action)):
            current_word_focused = pos_words.index(
                tuple(environment.current_focus))
            extra_rule_time = parser.model_parameters[
                "latency_factor"] * np.exp(
                    -parser.model_parameters["latency_exponent"] *
                    np.mean(activations) / 10)
            # two things play a role - number of matching features; fan of each matching feature; explore these two separately
            if len(eyemove_times) not in tobe_removed:
                eyemove_times.append(parser_sim.show_time() + extra_rule_time -
                                     last_time)
            else:
                tobe_removed.remove(len(eyemove_times))
            for i in range(word_parsed + 1, current_word_focused):
                eyemove_times.append(0)
                #eyemove_times.append((parser_sim.show_time() + extra_rule_time - last_time)/(current_word_focused-word_parsed))
            last_time = parser_sim.show_time()
            word_parsed = current_word_focused
        if word_parsed >= len(sentence):
            if len(eyemove_times) not in tobe_removed:
                eyemove_times.append(parser_sim.show_time() - last_time)
            break
        #this below implements carrying out an action

        if re.search("^RULE FIRED: recall action", parser_sim.current_event.action) or\
                                re.search("^RULE FIRED: move to last action", parser_sim.current_event.action):
            postulated_gaps, reduced_unary = 0, 0
            postulate_gaps, reduce_unary = True, True
            parser_sim.steps(2)  #exactly enough steps to make imaginal full
            if prints:
                print(parser.goals["imaginal"])
            #add new word to the list of used words
            built_constituents.append(
                (Tree(
                    str(parser.goals["imaginal"].copy().pop().TREE0_LABEL),
                    (str(parser.goals["imaginal"].copy().pop().TREE0_HEAD), )),
                 (None,
                  str(parser.goals["imaginal"].copy().pop().TREE0_HEAD))))
            built_constituents_reanalysis = built_constituents.copy()
            parser.goals["imaginal_reanalysis"].add(
                parser.goals["imaginal"].copy().pop())
            recently_retrieved = set()

            #set retrieve_wh to None or, if the reanalysis already postulated a gap, to "yes"
            retrieve_wh = retrieve_wh_reanalysis
            retrieve_wh_reanalysis = None
            activations, agreeing_actions, matching_fs, total_fan = [], [], [], [
            ]  #collects total activation of rules, agreeing_actions... (used to find out what plays a role in syntactic parsing for RTs)

            #antecedent_carried temporarily updated in the blind analysis; we will record the original position, which is the non-temporary one, in antecedent_carried_origo and re-use it after the blind analysis; what_antecedent_carried - specifies the category of antecedent
            antecedent_carried_origo = antecedent_carried

            first_action = True

            if word_parsed not in tobe_removed:
                reanalysis_list.append("no")  #by dft no reanalysis recorded
            #this loop for actual blind analysis
            while True:
                parser_retrievals, number_of_agreeing_actions, number_of_matching_fs, fan_size = ut.recall_action(
                    blind_actions,
                    parser.goals["imaginal"],
                    parser.goals["word_info"],
                    None,
                    recently_retrieved,
                    built_constituents,
                    word_freq,
                    label_freq,
                    prints=False,
                    strength_of_association=strength_of_association,
                    postulate_gaps=postulate_gaps,
                    reduce_unary=reduce_unary,
                    blind={"WORD_NEXT0_LEX", "WORD_NEXT0_POS"})

                # the activation for the first word comes only from the blind
                if word_parsed == 0:
                    activations.append(parser_retrievals[0])
                    agreeing_actions.append(number_of_agreeing_actions)
                    matching_fs.append(number_of_matching_fs)
                    total_fan.append(fan_size)

                ut.collect_parse(parser_retrievals[1], built_constituents)
                tree0_label = built_constituents[-1][0].label()
                tree1_label = built_constituents[-2][0].label()
                tree2_label = built_constituents[-3][0].label()
                tree3_label = built_constituents[-4][0].label()
                children = {
                    "".join(["tree", str(x)]): ["NOPOS", "NOPOS"]
                    for x in range(4)
                }
                for x, subtree in enumerate(built_constituents[-1][0]):
                    if isinstance(subtree,
                                  Tree) and subtree.label() != ut.EMPTY:
                        children["tree0"][x] = subtree.label()
                if re.search("_BAR", children["tree0"][1]):
                    if built_constituents[-1][0][1][1].label(
                    ) == ut.EMPTY or re.search(
                            "_BAR", built_constituents[-1][0][1][1].label()):
                        children["tree0"][1] = built_constituents[-1][0][1][
                            0].label()
                    else:
                        children["tree0"][1] = built_constituents[-1][0][1][
                            1].label()
                for x, subtree in enumerate(built_constituents[-2][0]):
                    if isinstance(subtree,
                                  Tree) and subtree.label() != ut.EMPTY:
                        children["tree1"][x] = subtree.label()
                if re.search("_BAR", children["tree1"][1]):
                    if built_constituents[-2][0][1][1].label(
                    ) == ut.EMPTY or re.search(
                            "_BAR", built_constituents[-2][0][1][1].label()):
                        children["tree1"][1] = built_constituents[-2][0][1][
                            0].label()
                    else:
                        children["tree1"][1] = built_constituents[-2][0][1][
                            1].label()

                # block looping through reduce_unary (at most 2 reduce_unary allowed)
                if parser_retrievals[1] and parser_retrievals[1][
                        "action"] == 'reduce_unary':
                    reduced_unary += 1
                    if reduced_unary == 2:
                        reduce_unary = False
                        reduced_unary = 0
                else:
                    reduced_unary = 0
                    reduce_unary = True
                if parser_retrievals[1] and parser_retrievals[1][
                        "action"] == 'postulate_gap':
                    if antecedent_carried == "YES" and syntactic and re.search(
                            "t",
                            str(parser_retrievals[1]["action_result_label"]
                                [0])):
                        retrieve_wh = "yes"
                    if re.search(
                            "t",
                            str(parser_retrievals[1]["action_result_label"]
                                [0])):
                        antecedent_carried = "NO"
                    #at most 3 gaps allowed
                    if postulated_gaps > 1:
                        postulate_gaps = False
                    postulated_gaps += 1
                    ci = parser.goals["imaginal"].pop()

                    string = """
    isa             action_chunk
    WORD_NEXT0_LEX   """ + '"' + str(ci.WORD_NEXT0_LEX) + '"' + """
    WORD_NEXT0_POS   '""" + str(ci.WORD_NEXT0_POS) + """'
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    TREE0_HEAD       """ + '"' + str(parser_retrievals[1]
                                     ["action_result_label"][0]) + '"' + """
    TREE0_LEFTCHILD    """ + children["tree0"][0] + """
    TREE0_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE0_LABEL       '-NONE-'
    TREE1_LEFTCHILD    """ + children["tree1"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree1"][1] + """
    TREE0_HEADPOS     """ + str(built_constituents[-1][1][0]) + """
    TREE1_LABEL     """ + '"' + tree1_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents[-2][1][0]) + """
    TREE1_HEAD     """ + '"' + str(built_constituents[-2][1][1]) + '"' + """
    TREE2_LABEL     """ + '"' + tree2_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents[-3][1][0]) + """
    TREE2_HEAD     """ + '"' + str(built_constituents[-3][1][1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree3_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents[-4][1][1]) + '"' + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal"].add(
                        actr.chunkstring(string=string))
                    parser.goals["word_info"].add(
                        actr.chunkstring(string="""
                    isa         word
                    form       '""" + str(parser_retrievals[1]
                                          ["action_result_label"][0]) + """'
                    cat         '-NONE-'"""))

                elif parser_retrievals[1]:
                    ci = parser.goals["imaginal"].pop()

                    string = """
    isa             action_chunk
    WORD_NEXT0_LEX   """ + '"' + str(ci.WORD_NEXT0_LEX) + '"' + """
    WORD_NEXT0_POS   '""" + str(ci.WORD_NEXT0_POS) + """'
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    TREE0_LABEL     """ + '"' + str(
                        built_constituents[-1][0].label()) + '"' + """
    TREE0_HEADPOS     """ + str(built_constituents[-1][1][0]) + """
    TREE0_HEAD     """ + '"' + str(built_constituents[-1][1][1]) + '"' + """
    TREE0_LEFTCHILD    """ + children["tree0"][0] + """
    TREE0_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE1_LABEL     """ + '"' + tree1_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents[-2][1][0]) + """
    TREE1_HEAD     """ + '"' + str(built_constituents[-2][1][1]) + '"' + """
    TREE1_LEFTCHILD    """ + children["tree1"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree1"][1] + """
    TREE2_LABEL     """ + '"' + tree2_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents[-3][1][0]) + """
    TREE2_HEAD     """ + '"' + str(built_constituents[-3][1][1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree3_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents[-4][1][1]) + '"' + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal"].add(
                        actr.chunkstring(string=string))
                else:
                    break
                if parser_retrievals[1]["action"] == 'shift':
                    #sometimes the parser would stop at BAR and shift; in reality, this is not possible since BARs are artificial categories
                    if re.search("_BAR", built_constituents[-1][0].label()):
                        built_constituents[-1][0].set_label(
                            re.split("_BAR",
                                     built_constituents[-1][0].label())[0])
                    ci = parser.goals["imaginal"].pop()

                    string = """
    isa             action_chunk
    TREE1_LABEL     """ + '"' + tree0_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents[-1][1][0]) + """
    TREE1_HEAD     """ + '"' + str(built_constituents[-1][1][1]) + '"' + """
    TREE1_LEFTCHILD    """ + children["tree0"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE2_LABEL     """ + '"' + tree1_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents[-2][1][0]) + """
    TREE2_HEAD     """ + '"' + str(built_constituents[-2][1][1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree2_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents[-3][1][1]) + '"' + """
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal"].add(
                        actr.chunkstring(string=string))
                    break

            postulated_gaps, reduced_unary = 0, 0
            postulate_gaps, reduce_unary = True, True

            antecedent_carried = antecedent_carried_origo

            # the activation for the first word comes only from the blind
            if word_parsed == 0:
                activations_list.append(np.mean(activations) / 10)
                agreeing_actions_list.append(np.mean(agreeing_actions))
                matching_fs_list.append(np.mean(matching_fs))
                total_fan_list.append(np.mean(total_fan))
                critical.pop(0)

            #this loop for potential reanalysis
            while True:
                if critical[0] != "no":
                    try:
                        critical_rule = critical_rules[critical[0]].pop(0)
                    except KeyError:
                        critical_rule = None
                    parser_retrievals, number_of_agreeing_actions, number_of_matching_fs, fan_size = ut.recall_action(
                        actions,
                        parser.goals["imaginal_reanalysis"],
                        parser.goals["word_info"],
                        critical_rule,
                        recently_retrieved,
                        built_constituents_reanalysis,
                        word_freq,
                        label_freq,
                        prints=False,
                        strength_of_association=strength_of_association,
                        number_retrieved=3,
                        postulate_gaps=postulate_gaps,
                        reduce_unary=reduce_unary,
                        blind={})
                else:
                    parser_retrievals, number_of_agreeing_actions, number_of_matching_fs, fan_size = ut.recall_action(
                        actions,
                        parser.goals["imaginal_reanalysis"],
                        parser.goals["word_info"],
                        None,
                        recently_retrieved,
                        built_constituents_reanalysis,
                        word_freq,
                        label_freq,
                        prints=False,
                        strength_of_association=strength_of_association,
                        number_retrieved=3,
                        postulate_gaps=postulate_gaps,
                        reduce_unary=reduce_unary,
                        blind={})

                activations.append(parser_retrievals[0])
                agreeing_actions.append(number_of_agreeing_actions)
                matching_fs.append(number_of_matching_fs)
                total_fan.append(fan_size)

                if first_action:
                    actions_list.append(str(parser_retrievals[1]["action"]))
                    first_action = False

                ut.collect_parse(parser_retrievals[1],
                                 built_constituents_reanalysis)
                tree0_label = built_constituents_reanalysis[-1][0].label()
                tree1_label = built_constituents_reanalysis[-2][0].label()
                tree2_label = built_constituents_reanalysis[-3][0].label()
                tree3_label = built_constituents_reanalysis[-4][0].label()
                children = {
                    "".join(["tree", str(x)]): ["NOPOS", "NOPOS"]
                    for x in range(4)
                }
                for x, subtree in enumerate(built_constituents[-1][0]):
                    if isinstance(subtree,
                                  Tree) and subtree.label() != ut.EMPTY:
                        children["tree0"][x] = subtree.label()
                if re.search("_BAR", children["tree0"][1]):
                    if built_constituents[-1][0][1][1].label(
                    ) == ut.EMPTY or re.search(
                            "_BAR", built_constituents[-1][0][1][1].label()):
                        children["tree0"][1] = built_constituents[-1][0][1][
                            0].label()
                    else:
                        children["tree0"][1] = built_constituents[-1][0][1][
                            1].label()
                for x, subtree in enumerate(built_constituents[-2][0]):
                    if isinstance(subtree,
                                  Tree) and subtree.label() != ut.EMPTY:
                        children["tree1"][x] = subtree.label()
                if re.search("_BAR", children["tree1"][1]):
                    if built_constituents[-2][0][1][1].label(
                    ) == ut.EMPTY or re.search(
                            "_BAR", built_constituents[-2][0][1][1].label()):
                        children["tree1"][1] = built_constituents[-2][0][1][
                            0].label()
                    else:
                        children["tree1"][1] = built_constituents[-2][0][1][
                            1].label()

                if re.search("-TPC", tree0_label) or (re.search(
                        "^W", tree0_label)):
                    antecedent_carried = "YES"
                    what_antecedent_carried = str(tree0_label)

                # block looping through reduce_unary (at most 2 reduce_unary allowed)
                if parser_retrievals[1] and parser_retrievals[1][
                        "action"] == 'reduce_unary':
                    reduced_unary += 1
                    if reduced_unary == 2:
                        reduce_unary = False
                        reduced_unary = 0
                else:
                    reduced_unary = 0
                    reduce_unary = True
                if parser_retrievals[1] and parser_retrievals[1][
                        "action"] == 'postulate_gap':
                    if antecedent_carried_origo == "YES" and syntactic and re.search(
                            "t",
                            str(parser_retrievals[1]["action_result_label"]
                                [0])) and retrieve_wh != "yes":
                        retrieve_wh_reanalysis = "yes"  #record that based on the upcoming word info, trace should be postulated; only if the original structure did not postulate it
                    if re.search(
                            "t",
                            str(parser_retrievals[1]["action_result_label"]
                                [0])):
                        antecedent_carried = "NO"
                    #at most 3 gaps allowed
                    if postulated_gaps > 1:
                        postulate_gaps = False
                    postulated_gaps += 1
                    ci = parser.goals["imaginal_reanalysis"].pop()
                    parser.decmem.add(ci, time=parser_sim.show_time())

                    string = """
    isa             action_chunk
    WORD_NEXT0_LEX   """ + '"' + str(ci.WORD_NEXT0_LEX) + '"' + """
    WORD_NEXT0_POS   """ + '"' + str(ci.WORD_NEXT0_POS) + '"' + """
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    TREE0_HEAD       """ + '"' + str(
                        parser_retrievals[1]["action_result_label"][0]
                    ) + '"' + """
    TREE0_LABEL       '-NONE-'
    TREE0_HEADPOS     """ + str(built_constituents_reanalysis[-1][1][0]) + """
    TREE0_LEFTCHILD    """ + children["tree0"][0] + """
    TREE0_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE1_LABEL     """ + '"' + tree1_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents_reanalysis[-2][1][0]) + """
    TREE1_HEAD     """ + '"' + str(
                        built_constituents_reanalysis[-2][1][1]) + '"' + """
    TREE1_LEFTCHILD    """ + children["tree1"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree1"][1] + """
    TREE2_LABEL     """ + '"' + tree2_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents_reanalysis[-3][1][0]) + """
    TREE2_HEAD     """ + '"' + str(built_constituents_reanalysis[-3][1]
                                   [1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree3_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents_reanalysis[-4][1]
                                   [1]) + '"' + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal_reanalysis"].add(
                        actr.chunkstring(string=string))
                    parser.goals["word_info"].add(
                        actr.chunkstring(string="""
                    isa         word
                    form       '""" + str(parser_retrievals[1]
                                          ["action_result_label"][0]) + """'
                    cat         '-NONE-'"""))

                elif parser_retrievals[1]:
                    ci = parser.goals["imaginal_reanalysis"].pop()
                    parser.decmem.add(ci, time=parser_sim.show_time())

                    string = """
    isa             action_chunk
    WORD_NEXT0_LEX   """ + '"' + str(ci.WORD_NEXT0_LEX) + '"' + """
    WORD_NEXT0_POS   """ + '"' + str(ci.WORD_NEXT0_POS) + '"' + """
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    TREE0_LABEL     """ + '"' + str(built_constituents_reanalysis[-1][0].label(
                    )) + '"' + """
    TREE0_HEADPOS     """ + str(built_constituents_reanalysis[-1][1][0]) + """
    TREE0_HEAD     """ + '"' + str(
                        built_constituents_reanalysis[-1][1][1]) + '"' + """
    TREE0_LEFTCHILD    """ + children["tree0"][0] + """
    TREE0_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE1_LABEL     """ + '"' + tree1_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents_reanalysis[-2][1][0]) + """
    TREE1_HEAD     """ + '"' + str(built_constituents_reanalysis[-2][1]
                                   [1]) + '"' + """
    TREE1_LEFTCHILD    """ + children["tree1"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree1"][1] + """
    TREE2_LABEL     """ + '"' + tree2_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents_reanalysis[-3][1][0]) + """
    TREE2_HEAD    """ + '"' + str(built_constituents_reanalysis[-3][1]
                                  [1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree3_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents_reanalysis[-4][1]
                                   [1]) + '"' + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal_reanalysis"].add(
                        actr.chunkstring(string=string))
                else:
                    break
                if parser_retrievals[1]["action"] == 'shift':
                    #sometimes the parser would stop at BAR and shift; in reality, this is not possible since BARs are artificial categories
                    if re.search("_BAR",
                                 built_constituents_reanalysis[-1][0].label()):
                        built_constituents_reanalysis[-1][0].set_label(
                            re.split(
                                "_BAR", built_constituents_reanalysis[-1]
                                [0].label())[0])
                    ci = parser.goals["imaginal_reanalysis"].pop()
                    parser.decmem.add(ci, time=parser_sim.show_time())
                    #built constituents have head info; if it is not present, use the info from imaginal_reanalysis (stores head info for terminal nodes)

                    string = """
    isa             action_chunk
    TREE1_LABEL     """ + '"' + tree0_label + '"' + """
    TREE1_HEADPOS     """ + str(built_constituents_reanalysis[-1][1][0]) + """
    TREE1_HEAD     """ + '"' + str(
                        built_constituents_reanalysis[-1][1][1]) + '"' + """
    TREE1_LEFTCHILD    """ + children["tree0"][0] + """
    TREE1_RIGHTCHILD    """ + children["tree0"][1] + """
    TREE2_LABEL     """ + '"' + tree1_label + '"' + """
    TREE2_HEADPOS     """ + str(built_constituents_reanalysis[-2][1][0]) + """
    TREE2_HEAD     """ + '"' + str(built_constituents_reanalysis[-2][1]
                                   [1]) + '"' + """
    TREE3_LABEL     """ + '"' + tree2_label + '"' + """
    TREE3_HEAD     """ + '"' + str(built_constituents_reanalysis[-3][1]
                                   [1]) + '"' + """
    ANTECEDENT_CARRIED      """ + antecedent_carried + """
    ACTION_PREV     """ + str(parser_retrievals[1]["action"])
                    parser.goals["imaginal_reanalysis"].add(
                        actr.chunkstring(string=string))
                    break

            cg = parser.goals["g"].pop()
            parser.goals["g"].add(
                actr.chunkstring(string="""
    isa             reading
    position    """ + str(cg.position) + """
    reanalysis      None
    retrieve_wh     """ + str(retrieve_wh) + """
    what_retrieve     """ + str(
                    what_antecedent_carried)  #used only for recall of category
                                 + """
    state           finished_recall"""))
            if built_constituents != built_constituents_reanalysis:
                if reanalysis and len(built_constituents) != len(
                        built_constituents_reanalysis):
                    #mark that the reanalysis should take place
                    parser.goals["g"].add(
                        actr.chunkstring(string="""
    isa             reading
    position    """ + str(cg.position) + """
    reanalysis      yes
    retrieve_wh     """ + str(retrieve_wh) + """
    what_retrieve     """ + str(what_antecedent_carried
                                )  #used only for recall of category
                                         + """
    state           finished_recall"""))
                    if word_parsed not in tobe_removed:
                        reanalysis_list[-1] = "yes"
                    if prints:
                        original_tree = Tree(
                            "X", next(zip(*built_constituents[3:])))
                        print("DRAWING TREE TO BE REANALYSED")
                        print("********************************")
                        original_tree.draw()
                built_constituents = built_constituents_reanalysis.copy()
                parser.goals["imaginal"].add(
                    parser.goals["imaginal_reanalysis"].copy().pop())

            final_tree = Tree("X", next(zip(*built_constituents[3:])))
            if word_parsed not in tobe_removed:
                activations_list.append(np.mean(activations) / 10)
                wh_gaps_list.append(str(retrieve_wh))
                agreeing_actions_list.append(np.mean(agreeing_actions))
                matching_fs_list.append(np.mean(matching_fs))
                total_fan_list.append(np.mean(total_fan))
                words_list.append(sentence[word_parsed])
                critical.pop(0)

            if prints:
                print("DRAWING TREE")
                print("********************************")
                # print(final_tree)
                # final_tree.pretty_print()
                final_tree.draw()

    return words_list, activations_list[:
                                        -1], wh_gaps_list, reanalysis_list, agreeing_actions_list[:
                                                                                                  -1], matching_fs_list[:
                                                                                                                        -1], total_fan_list[:
                                                                                                                                            -1]
예제 #6
0
import pyactr as actr

actr.chunktype("parsing_goal", "stack_top stack_bottom parsed_word task")
actr.chunktype("sentence", "word1 word2 word3")
actr.chunktype("word", "form, cat")

parser = actr.ACTRModel()
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0.2)

dm.add(
    actr.chunkstring(string="""
    isa word
    form Mary
    cat ProperN
"""))
dm.add(
    actr.chunkstring(string="""
    isa word
    form Bill
    cat ProperN
"""))
dm.add(actr.chunkstring(string="""
    isa word
    form likes
    cat V
"""))

g.add(
def run_extraction_stimulus(sentence):
    """
    This runs one example of stimulus from the parser ACT-R model.
    """
    parser.set_decmem(DM)

    parser.retrievals = {}
    parser.set_retrieval("retrieval")
    parser.visbuffers = {}
    parser.goals = {}
    parser.set_goal("g")
    parser.set_goal(name="imaginal", delay=0)

    stimuli = [{} for i in range(len(sentence))]
    pos_word = 30
    environment.current_focus = (pos_word + 5 * len(sentence.iloc[0]), 180)
    last_pos_word = 0
    for i, word in enumerate(sentence):
        pos_word += 5 * len(word)
        for j in range(len(stimuli)):
            if j == i:
                stimuli[j].update({
                    i: {
                        'text': word,
                        'position': (pos_word, 180),
                        'vis_delay': len(word)
                    }
                })
            else:
                stimuli[j].update({
                    i: {
                        'text': "___",
                        'position': (pos_word, 180),
                        'vis_delay': 3
                    }
                })

        parser.productionstring(name="move eyes" + str(i),
                                string="""
        =g>
        isa             parsing_goal
        task            move_eyes
        =visual_location>
        isa _visuallocation
        screen_y =ypos
        screen_x    """ + str(last_pos_word) + """
        ?manual>
        preparation       free
        processor       free
        ==>
        =g>
        isa     parsing_goal
        task   reading_word
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word) + """
        screen_y =ypos
        ~visual>""")
        last_pos_word = pos_word
        pos_word += 5 * len(word)

    parser.goals["g"].add(
        actr.chunkstring(string="""
        isa             parsing_goal
        task            reading_word
        stack1          'S'
        right_frontier_stack1  'S'
        right_frontier_stack2  None
    """))

    parser_sim = parser.simulation(
        realtime=False,
        gui=True,
        trace=False,
        environment_process=environment.environment_process,
        stimuli=stimuli,
        triggers='space',
        times=10)

    spacebar_press_times = []

    i = 0
    press_time = 0
    while True:
        try:
            parser_sim.step()
        except simpy.core.EmptySchedule:
            spacebar_press_times = [
                np.nan for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report time-out time per word (5 s) or nan
            break
        if parser_sim.show_time() > 20:
            spacebar_press_times = [
                np.nan for _ in sentence
            ]  #this takes care of looping - break if you loop (20 s should be definitely enough to move on)
            break
        if parser_sim.current_event.action == "KEY PRESSED: SPACE":
            spacebar_press_times.append(parser_sim.show_time() - press_time)
            press_time = parser_sim.show_time()
            i += 1
        if i > 9:  #we ignore the words after the matrix verb, these are any words higher than 9
            break

    final_times = np.array(spacebar_press_times[1:])
    return final_times
def reading(grouped, rank, declchunks):
    """
    Main function, running reading. 
    """

    for name, group in grouped:  #name is name of the sentence in materials, group = sentence

        #print(name)

        stimulus = {}
        stimuli = []

        y_position = 0  #this will check if some text appears on the same screen or not

        freq = {}

        positions = {}

        lastwords = {}  #storing last words in each line

        #the following loop runs through a sentence word by word and creates a stimulus out of the sentence, recording the exact position; it also stores each word in decl. memory with the correct activation

        for idx, i in enumerate(group.index):
            if group.IA_TOP[i] < y_position:
                stimuli.append(stimulus)
                stimulus = {}
            word = str(group.WORD[i])
            pos = str(group.PART_OF_SPEECH[i])
            stimulus[i] = {
                'text': word,
                'position': (group.IA_CENTER[i], group.IA_TOP[i]),
                "vis_delay": len(word)
            }
            positions[(int(group.IA_CENTER[i]),
                       int(group.IA_TOP[i]))] = [idx, 0]  #dict to record RTs
            lastwords[str(group.IA_TOP[i])] = str(
                group.LAST_WORD[i])  #dict for last words in line
            last_position = (group.IA_CENTER[i], group.IA_TOP[i]
                             )  #last word in sentence; when reached, stop sim

            #add word into memory
            if not actr.makechunk("", typename="word", form=word,
                                  cat=pos) in declchunks:
                freq[word] = group.FREQUENCY[i]
                if freq[word] == 0:
                    freq[word] = 1
                word_freq = freq[
                    word] * USED_WORDS / 100  #BNC - 100 millions; estimated use by entering adulthood - 112.5 millions; we have to multiply by 1.125)
                time_interval = SEC_IN_TIME / word_freq
                chunk_times = np.arange(start=-time_interval,
                                        stop=-(time_interval * word_freq) - 1,
                                        step=-time_interval)
                declchunks[actr.makechunk("",
                                          typename="word",
                                          form=word,
                                          cat=pos)] = math.log(
                                              np.sum(
                                                  (0 - chunk_times)**(-DECAY)))
            y_position = group.IA_TOP[i]

        #the following 2 rules signal whether the reader should move to a new line or not depending on the x position

        for y in lastwords:
            tempstring = "\
        =g>\
        isa     read\
        state   parse\
        ?retrieval>\
        buffer  full\
        =visual_location>\
        isa _visuallocation\
        screen_y =ypos\
        screen_y " + y + "\
        screen_x ~ " + lastwords[y] + "\
        ==>\
        =g>\
        isa     read\
        state   start\
        ?visual_location>\
        attended False\
        +visual_location>\
        isa _visuallocation\
        screen_x lowest\
        screen_y =ypos\
        ~retrieval>"

            parser.productionstring(name="move eyes in the line " + y,
                                    string=tempstring)

            tempstring = "\
        =g>\
        isa     read\
        state   parse\
        ?retrieval>\
        buffer  full\
        =visual_location>\
        isa _visuallocation\
        screen_y =ypos\
        screen_y " + y + "\
        screen_x " + lastwords[y] + "\
        ==>\
        =g>\
        isa     read\
        state   start\
        ?visual_location>\
        attended False\
        +visual_location>\
        isa _visuallocation\
        screen_x lowest\
        screen_y onewayclosest\
        ~retrieval>"

            parser.productionstring(name="move eyes to a new line" + y,
                                    string=tempstring)

        #in the following part, the parser is made ready for reading - modules and buffers are initialized, focus is placed on the 1st word in sentence

        stimuli.append(stimulus)

        parser.decmems = {}
        parser.set_decmem({x: np.array([]) for x in declchunks})

        parser.decmem.activations.update(declchunks)

        parser.retrievals = {}
        parser.set_retrieval("retrieval")

        parser.visbuffers = {}

        environment.current_focus = [
            stimuli[0][min(stimuli[0])]['position'][0],
            stimuli[0][min(stimuli[0])]['position'][1]
        ]  #focus on the first word

        parser.visualBuffer("visual",
                            "visual_location",
                            parser.decmem,
                            finst=80)
        parser.goals = {}
        parser.set_goal("g")
        parser.set_goal("g2")
        parser.goals["g"].add(
            actr.chunkstring(string="""
                isa     read
                state   start"""))
        parser.goals["g2"].add(
            actr.chunkstring(string="""
                isa     parsing
                top     None"""))
        parser.goals["g2"].delay = 0.2

        #simulation is started
        sim = parser.simulation(
            realtime=False,
            trace=False,
            gui=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='A',
            times=100)

        #variables that are used in recording eye fixation times are initialized
        last_time = 0
        cf = tuple(environment.current_focus)
        generated_list = []

        #we'll run a loop in which we proceed event by event; we record eye fixation times (by recording the time until cf (eye focus) changes)); any break signals the end of simulation; break could arise not only when the sentence is finished; it could also arise if something went wrong, e.g., if time was higher than 100 seconds (if, for example, parameter values were very high) or if deck of events got empty
        while True:
            if sim.show_time() > 100:
                generated_list = [
                    len(group.WORD) - 2
                ] + [0] * (len(group.WORD) - 2)  #0 if getting stuck
                break
            try:
                #next event
                sim.step()
                #print(sim.current_event)
            except (EmptySchedule, OverflowError):
                generated_list = [len(group.WORD) - 2] + [10000] * (
                    len(group.WORD) - 2
                )  #10000 if not finishing or overflowing (too much time)
                break
            if not positions:
                break
            if cf[0] != environment.current_focus[0] or cf[
                    1] != environment.current_focus[1]:
                positions[cf][1] = 1000 * (sim.show_time() - last_time
                                           )  #time in milliseconds
                last_time = sim.show_time()
                cf = tuple(environment.current_focus)
            if cf == last_position:
                break

        #after simulation, we collect eye fixation times in a simple list that can be transferred using MPI
        if not generated_list:
            ordered_keys = sorted(list(positions),
                                  key=lambda x: positions[x][0]
                                  )  #keys ordered from first word to last word
            generated_list = [len(group.WORD) - 2] + [
                positions[x][1] for x in ordered_keys
            ][1:-1]  #first and last words ignored
        assert len(generated_list) == len(
            group.WORD
        ) - 1, "In %s, the length of generated RTs would be %s, expected number of words is %s. This is illegal mismatch" % (
            name, len(generated_list) + 1, len(group.WORD))
        comm.Send(bytearray(name, 'utf-8'), dest=0, tag=0)
        sent_list = np.array(generated_list, dtype=np.float)
        comm.Send([sent_list, MPI.FLOAT], dest=0, tag=1)

    #when we are done with all simulations, we send info to the master
    comm.Send(bytearray('DONE', 'utf-8'), dest=0, tag=0)

    return declchunks
environment = actr.Environment(focus_position=(320, 180))

actr.chunktype("parsing_goal",
               "task stack1 stack2 stack3 stack4 parsed_word right_frontier found gapped")
actr.chunktype("parse_state",
               "node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment, subsymbolic=True, retrieval_threshold=-5, latency_factor=0.1, latency_exponent=0.13)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)

dm.add(actr.chunkstring(string="""
    isa  word
    form 'Mary'
    cat  'ProperN'
"""))

dm.add(actr.chunkstring(string="""
    isa  word
    form 'The'
    cat  'Det'
"""))

dm.add(actr.chunkstring(string="""
    isa  word
    form 'the'
    cat  'Det'
"""))
dm.add(actr.chunkstring(string="""
예제 #10
0
"""
CFG for the a^n b^n language.
"""

import pyactr as actr

cfg = actr.ACTRModel()

actr.chunktype("countOrder", "first, second")
actr.chunktype("countFrom", ("start", "end", "count", "terminal"))

dm = cfg.decmem
dm.add(actr.chunkstring(string="""
    isa         countOrder
    first       1
    second      2
"""))
dm.add(actr.chunkstring(string="""
    isa         countOrder
    first       2
    second      3
"""))
dm.add(actr.chunkstring(string="""
    isa         countOrder
    first       3
    second      4
"""))
dm.add(actr.chunkstring(string="""
    isa         countOrder
    first       4
    second      5
예제 #11
0
"""
A basic model of grammar.
"""

import pyactr as actr

regular_grammar = actr.ACTRModel()

actr.chunktype("goal_chunk", "mother daughter1 daughter2 state")

dm = regular_grammar.decmem

regular_grammar.goal.add(actr.chunkstring(string="""
    isa         goal_chunk
    mother      'NP'
    state       rule
"""))

regular_grammar.productionstring(name="NP ==> N NP", string="""
    =g>
    isa         goal_chunk
    mother      'NP'
    daughter1   None
    daughter2   None
    state       rule
    ==>
    =g>
    isa         goal_chunk
    daughter1   'N'
    daughter2   'NP'
    state       show
예제 #12
0
def read(parser,
         sentence=None,
         pos=None,
         activations=None,
         strength_of_association={},
         weight=1,
         threshold=0,
         decmem={},
         lexical=True,
         visual=True,
         syntactic=True,
         reanalysis=True,
         prints=True,
         extra_prints=True,
         condition=None,
         sent_nr=None):
    """
    Read a sentence.

    :param sentence: what sentence should be read (list).
    :param pos: what pos should be used (list, matching in length with sentence).
    :param activations: dataframe of activations
    :param lexical - should lexical information affect reading time?
    :param visual - should visual information affect reading time?
    :param syntactic - should syntactic information affect reading time?
    :param reanalysis - should reanalysis of parse affect reading time?
    """
    if extra_prints:
        start = time.process_time()
    parser.set_decmem(decmem)
    parser.decmem.activations = decmem.activations
    tobe_removed = {
        i
        for i in range(len(sentence))
        if (re.match("[:]+", sentence[i]) or sentence[i] == "'s")
        and i != len(sentence) - 1
    }  #we remove non-words ('s, diacritics)
    if prints:
        print(sentence)
        for x in tobe_removed:
            print(sentence[x])

    if not lexical:
        for x in parser.decmem:
            parser.decmem.activations[
                x] = 100  #this is to nulify the effect of word retrieval to almost 0

    parser.retrievals = {}
    parser.set_retrieval("retrieval")
    parser.visbuffers = {}
    parser.goals = {}
    parser.set_goal("g")
    parser.set_goal(name="imaginal", delay=0)
    parser.set_goal(name="imaginal_reanalysis", delay=0)
    parser.set_goal("word_info")

    stimuli = [{}]
    pos_word = 10  #starting position - just some not so high number higher than 0
    environment.current_focus = (pos_word + 7 +
                                 7 * visual_effect(sentence[0], visual), 180)
    for x in range(41):
        #this removes any move eyes created previously; we assume that no sentence is longer than 20 words
        parser.productionstring(name="move eyes" + str(x),
                                string="""
        =g>
        isa         reading
        state       dummy
        ==>
        =g>
        isa         reading
        state       dummy""")

    if extra_prints:
        end = time.process_time()
        print("First part",
              condition,
              sent_nr,
              "TIME:",
              end - start,
              flush=True)
        start = time.process_time()

    for i, word in enumerate(sentence):
        pos_word += 7 + 7 * visual_effect(word, visual)
        stimuli[0].update({
            i: {
                'text': word,
                'position': (pos_word, 180),
                'vis_delay': visual_effect(word, visual)
            }
        })

        if i < len(sentence) - 3:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        WORD_NEXT1_LEX        """ + '"' + str(sentence[i + 3]) + '"' + """
        WORD_NEXT1_POS        """ + str(pos[i + 3]) + """
        =g>
        isa             reading
        state   finding_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 2:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        WORD_NEXT1_LEX        None
        =g>
        isa             reading
        state   finding_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 1:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        None
        WORD_NEXT1_LEX        None
        =g>
        isa             reading
        state   finding_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")

    if extra_prints:
        end = time.process_time()
        print("Second part",
              condition,
              sent_nr,
              "TIME:",
              end - start,
              flush=True)
        start = time.process_time()

    parser.goals["g"].add(
        actr.chunkstring(string="""
    isa             reading
    state           reading_word
    position        0
    tag             """ + str(pos[0])))

    parser.goals["imaginal"].add(
        actr.chunkstring(string="""
    isa             action_chunk
    TREE1_LABEL         NOPOS
    TREE1_HEAD          noword
    TREE2_LABEL         xxx
    TREE2_HEAD          xxx
    TREE3_LABEL         xxx
    TREE3_HEAD          xxx
    ANTECEDENT_CARRIED  NO
    WORD_NEXT0_LEX   """ + '"' + str(sentence[1]) + '"' + """
    WORD_NEXT0_POS   """ + str(pos[1])))

    # start a dictionary that will collect all created structures, and a list of built constituents
    built_constituents = [(Tree("xxx", []), (None, "xxx")),
                          (Tree("xxx", []), (None, "xxx")),
                          (Tree("NOPOS", []), (None, "noword"))]
    final_tree = Tree("X", [])

    if prints:
        parser_sim = parser.simulation(
            realtime=False,
            gui=False,
            trace=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)
    else:
        parser_sim = parser.simulation(
            realtime=False,
            gui=True,
            trace=False,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)

    antecedent_carried = "NO"
    what_antecedent_carried = None

    eye_mvt_times = []  #reaction times, recorded and returned

    reanalyses = []  #reanalysis: 0 - no; 1 - yes

    recall_failures = []  #prob that the parser fails to recall rule

    word_parsed = min(activations['position'])
    last_time = 0

    if extra_prints:
        end = time.process_time()
        print(" Third part",
              condition,
              sent_nr,
              "TIME:",
              end - start,
              flush=True)
        start = time.process_time()

    while True:
        try:
            parser_sim.step()
            #print(parser_sim.current_event)
        except simpy.core.EmptySchedule:
            eye_mvt_times = [
                10 for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report time-out time per word (10 s) or nan
            recall_failures = [
                1 for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report failure
            reanalyses = [1 for _ in sentence]
            break
        if parser_sim.show_time() > 60:
            eye_mvt_times = [
                10 for _ in sentence
            ]  #this takes care of looping or excessive time spent - break if you loop (10 s should be definitely enough to move on)
            recall_failures = [
                1 for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report failure
            reanalyses = [1 for _ in sentence]
            break
        if re.search("^SHIFT COMPLETE", str(parser_sim.current_event.action)):
            activation = activations[activations['position'].isin(
                [word_parsed])]['activation'].to_numpy()[0]
            extra_rule_time = parser.model_parameters[
                "latency_factor"] * np.exp(
                    -parser.model_parameters["latency_exponent"] *
                    (activation * weight))

            recall_failures.append(
                1 - 1 / (1 + np.exp(-(activation - threshold) / 0.4)))

            #reanalysis - adds prob. of regression
            reanalysis = activations[activations['position'].isin(
                [word_parsed])]['reanalysis'].values[0]
            if reanalysis == "yes":
                reanalyses.append(1)
            else:
                reanalyses.append(0)

            # tobe_removed stores positions of parts of words (e.g., 's - we do not calculate RTs on those)
            if len(eye_mvt_times) not in tobe_removed:
                eye_mvt_times.append(parser_sim.show_time() + extra_rule_time -
                                     last_time)
            else:
                tobe_removed.remove(len(eye_mvt_times))
            last_time = parser_sim.show_time()
        if re.search(r"^ENCODED VIS OBJECT",
                     str(parser_sim.current_event.action)):
            word_parsed += 1

        if re.search(
                "^RULE FIRED: move attention",
                str(parser_sim.current_event.action)
        ) and word_parsed >= max(
                activations['position']
        ):  #the last word is stopped after move attention - there is nothing to move attention to

            activation = activations[activations['position'].isin(
                [word_parsed])]['activation'].to_numpy()[0]

            extra_rule_time = parser.model_parameters[
                "latency_factor"] * np.exp(
                    -parser.model_parameters["latency_exponent"] *
                    (activation * weight))

            recall_failures.append(
                1 - 1 / (1 + np.exp(-(activation - threshold) / 0.4)))

            #reanalysis - adds prob. of regression
            reanalysis = activations[activations['position'].isin(
                [word_parsed])]['reanalysis'].values[0]
            if reanalysis == "yes":
                reanalyses.append(1)
            else:
                reanalyses.append(0)

            if len(eye_mvt_times) not in tobe_removed:
                eye_mvt_times.append(
                    parser_sim.show_time() + extra_rule_time - last_time
                )  #we could eventually add 150 ms to the last word (roughly, pressing the key; this amount of time used in G&G experiment simulation)
            break

        #this below - carrying out an action

        if re.search("^RULE FIRED: recall action", parser_sim.current_event.action) or\
                                re.search("^RULE FIRED: move to last action", parser_sim.current_event.action):
            parser_sim.steps(2)  #exactly enough steps to make imaginal full

            cg = parser.goals["g"].pop()
            wi = parser.goals["word_info"].copy().pop()
            parser.goals["g"].add(
                actr.chunkstring(string="""
    isa             reading
    position    """ + str(cg.position) + """
    reanalysis      no
    retrieve_wh     no
    state           finished_recall"""))

            if extra_prints:
                end = time.process_time()
                print("Loop",
                      condition,
                      sent_nr,
                      "TIME:",
                      end - start,
                      flush=True)
                start = time.process_time()
    #final_times = [ eye_mvt_times[0], eye_mvt_times[1], sum(eye_mvt_times[2:]) ] # this would create three measures following Staub - pre-critical word, critical word, spillover
    if prints:
        print("FINAL TIMES")
        print(eye_mvt_times[1:])

    if extra_prints:
        end = time.process_time()
        print("End", condition, sent_nr, "TIME:", end - start, flush=True)
        start = time.process_time()

    # return from the first element, because critical=0 is one word before the regions reported in Staub (11)

    if len(eye_mvt_times) == 0:
        eye_mvt_times = [
            -10 for _ in sentence
        ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report time-out time per word (10 s) or nan
        recall_failures = [
            1 for _ in sentence
        ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report failures
        reanalyses = [1 for _ in sentence]

    return np.array(eye_mvt_times[1:]), np.array(reanalyses[1:]), np.array(
        recall_failures[1:])
예제 #13
0
def run_fan_exp():
    """
    Run fan experiment.
    """
    sample = []

    for sentence in SENTENCES:
        sentence = sentence.split()
        run_time = 0
        for i in range(2):
            recall_by_location["utility"] = i #change utility so that the other way of recalling becomes active
            parser.goals["g"].add(actr.chunkstring(string="""
                isa             parsing_goal
                task            reading_word
                stack1          S
                right_frontier  S
                dref_peg        1
            """))
            parser.decmems = {}
            parser.decmems['decmem'] = DM.copy()
            parser.retrieval.finst = 5

            parser.set_goal

            environment.current_focus = (160, 180)
            stimuli = [{x: {'text': word, 'position': (160+x*40, 180)}\
                       for x, word in enumerate(sentence)}]
            parser.visualBuffer("visual", "visual_location",
                                parser.decmem, finst=80)
            parser_sim = parser.simulation(
            realtime=False,
            gui=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space', times=20)

            parser.model_parameters["motor_prepared"] = True
            parser.model_parameters["emma_noise"] = False

            while True:
                try:
                    parser_sim.step()
                    #print(parser_sim.current_event)
                except simpy.core.EmptySchedule:
                    break
                if re.search("^KEY PRESSED: J",\
                             str(parser_sim.current_event.action)):
                    parser.retrieval.pop()
                    # record time, use average of two runs if
                    # there is a previously recorded time
                    # to get average of recalling by person+location
                    if run_time:
                        run_time += parser_sim.show_time()
                        run_time = run_time/2
                    else:
                        run_time = parser_sim.show_time()
                        #print("RT", run_time)
                        #input()
                    break

        sample.append(run_time)

    return sample
예제 #14
0
"""

import pyactr as actr

import random

car = actr.makechunk(nameofchunk="car",\
                      typename="word", phonology="/ka:/", meaning="[[car]]", category="noun", number="sg", syncat="subject")

agreement = actr.ACTRModel()

dm = agreement.decmem
dm.add(car)

agreement.goal.add(
    actr.chunkstring(string="isa word task agree category 'verb'"))

agreement.productionstring(name="agree",
                           string="""
    =g>
    isa  word
    task trigger_agreement
    category 'verb'
    =retrieval>
    isa  word
    category 'noun'
    syncat 'subject'
    number =x
    ==>
    =g>
    isa  word
예제 #15
0
import pyactr as actr

counting = actr.ACTRModel()

#Each chunk type should be defined first.
actr.chunktype("countOrder", ("first", "second"))
#Chunk type is defined as (name, attributes)

#Attributes are written as an iterable (above) or as a string, separated by comma:
actr.chunktype("countOrder", "first, second")

dm = counting.decmem
#this creates declarative memory

dm.add(actr.chunkstring(string="\
    isa countOrder\
    first 1\
    second 2"))
dm.add(actr.chunkstring(string="\
    isa countOrder\
    first 2\
    second 3"))
dm.add(actr.chunkstring(string="\
    isa countOrder\
    first 3\
    second 4"))
dm.add(actr.chunkstring(string="\
    isa countOrder\
    first 4\
    second 5"))

#creating goal buffer
actr.chunktype("reading",
               "state position word reanalysis retrieve_wh what_retrieve tag")

actr.chunktype(
    "action_chunk",
    "ACTION ACTION_RESULT_LABEL ACTION_PREV WORD_NEXT0_LEX WORD_NEXT0_POS TREE0_LABEL TREE1_LABEL TREE2_LABEL TREE3_LABEL TREE0_HEAD TREE0_HEADPOS TREE0_LEFTCHILD TREE0_RIGHTCHILD TREE1_HEAD TREE1_HEADPOS TREE1_LEFTCHILD TREE1_RIGHTCHILD TREE2_HEAD TREE2_HEADPOS TREE3_HEAD ANTECEDENT_CARRIED"
)

for name, group in words:
    word = group.iloc[0]['word']
    function = sentences_csv[sentences_csv.word.isin(
        [word])].function.to_numpy()[0]
    freq = sentences_csv[sentences_csv.word.isin([word])].freq.to_numpy()[0]
    temp_dm[actr.chunkstring(string="""
        isa  word
        form """ + '"' + str(word) + '"' + """
        cat  """ + str(function) + """
        """)] = np.array([])
    temp_activations[actr.chunkstring(string="""
        isa  word
        form """ + '"' + str(word) + '"' + """
        cat  """ + str(function) + """
        """)] = calculate_activation(parser, 0 - get_freq_array(freq))

parser.decmems = {}
parser.set_decmem(temp_dm)

for elem in parser.decmem:
    parser.decmem.activations[elem] = temp_activations[elem]
예제 #17
0
def read(parser,
         sentence=None,
         pos=None,
         activations=None,
         condition=None,
         sent_nr='1',
         word_freq=word_freq,
         label_freq=label_freq,
         strength_of_association={},
         weight=1,
         decmem={},
         lexical=True,
         visual=True,
         syntactic=True,
         reanalysis=True,
         prints=True):
    """
    Read a sentence.

    :param sentence: what sentence should be read (list).
    :param pos: what pos should be used (list, matching in length with sentence).
    :param activations: dataframe of activations
    :param condition: name of condition (has to match with what is in ACTIVATIONS)
    :param sent_nr: sent_nr, usually a number (has to match with what is in ACTIVATIONS)
    :param lexical - should lexical information affect reading time?
    :param visual - should visual information affect reading time?
    :param syntactic - should syntactic information affect reading time?
    :param reanalysis - should reanalysis of parse affect reading time?
    """
    used_activations = activations[(activations['condition'].isin([condition]))
                                   & (activations['item'].isin([sent_nr]))]
    if prints:
        print(used_activations)
    parser.set_decmem(decmem)
    parser.decmem.activations = decmem.activations
    tobe_removed = {
        i
        for i in range(len(sentence))
        if (re.match("[:]+", sentence[i]) or sentence[i] == "'s")
        and i != len(sentence) - 1
    }
    if prints:
        print(sentence)
        for x in tobe_removed:
            print(sentence[x])

    if not lexical:
        for x in parser.decmem:
            parser.decmem.activations[
                x] = 100  #this is to nulify the effect of word retrieval to almost 0

    parser.retrievals = {}
    parser.set_retrieval("retrieval")
    parser.visbuffers = {}
    parser.goals = {}
    parser.set_goal("g")
    parser.set_goal(name="imaginal", delay=0)
    parser.set_goal(name="imaginal_reanalysis", delay=0)
    parser.set_goal("word_info")

    stimuli = [{} for i in range(len(sentence))]
    pos_word = 10
    environment.current_focus = (pos_word + 7 +
                                 7 * visual_effect(sentence[0], visual), 180)
    for x in range(41):
        #this removes any move eyes created previously; we assume that no sentence is longer than 20 words
        parser.productionstring(name="move eyes" + str(x),
                                string="""
        =g>
        isa         reading
        state       dummy
        ==>
        =g>
        isa         reading
        state       dummy""")
    for i, word in enumerate(sentence):
        pos_word += 7 + 7 * visual_effect(word, visual)
        for j in range(len(stimuli)):
            if j == i:
                stimuli[j].update({
                    i: {
                        'text': word,
                        'position': (pos_word, 180),
                        'vis_delay': visual_effect(word, visual)
                    }
                })
            else:
                stimuli[j].update({
                    i: {
                        'text': "___",
                        'position': (pos_word, 180),
                        'vis_delay': 3
                    }
                })

        if i < len(sentence) - 3:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        WORD_NEXT1_LEX        """ + '"' + str(sentence[i + 3]) + '"' + """
        WORD_NEXT1_POS        """ + str(pos[i + 3]) + """
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 2:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        """ + '"' + str(sentence[i + 2]) + '"' + """
        WORD_NEXT0_POS        """ + str(pos[i + 2]) + """
        WORD_NEXT1_LEX        None
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")
        elif i < len(sentence) - 1:
            parser.productionstring(name="move eyes" + str(i),
                                    string="""
        =g>
        isa             reading
        state            move_eyes
        position        """ + str(i) + """
        ?manual>
        preparation       free
        ==>
        =imaginal>
        isa         action_chunk
        WORD_NEXT0_LEX        None
        WORD_NEXT1_LEX        None
        =g>
        isa             reading
        state   reading_word
        position        """ + str(i + 1) + """
        tag             """ + str(pos[i + 1]) + """
        ?visual_location>
        attended False
        +visual_location>
        isa _visuallocation
        screen_x    """ + str(pos_word + 7 +
                              7 * visual_effect(sentence[i + 1], visual)) + """
        screen_y 180
        ~visual>""")

    if prints:
        print(sentence)

    parser.goals["g"].add(
        actr.chunkstring(string="""
    isa             reading
    state           reading_word
    position        0
    tag             """ + str(pos[0])))

    parser.goals["imaginal"].add(
        actr.chunkstring(string="""
    isa             action_chunk
    TREE1_LABEL         NOPOS
    TREE1_HEAD          noword
    TREE2_LABEL         NOPOS
    TREE2_HEAD          noword
    TREE3_LABEL         NOPOS
    TREE3_HEAD          noword
    ANTECEDENT_CARRIED  NO
    WORD_NEXT0_LEX   """ + '"' + str(sentence[1]) + '"' + """
    WORD_NEXT0_POS   """ + str(pos[1]) + """
    WORD_NEXT1_LEX   """ + '"' + str(sentence[2]) + '"' + """
    WORD_NEXT1_POS   """ + str(pos[2])))

    # start a dictionary that will collect all created structures, and a list of built constituents
    constituents = {}
    built_constituents = [(Tree("NOPOS", []), (None, "noword")),
                          (Tree("NOPOS", []), (None, "noword")),
                          (Tree("NOPOS", []), (None, "noword"))]
    final_tree = Tree("X", [])

    if prints:
        parser_sim = parser.simulation(
            realtime=False,
            gui=False,
            trace=True,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)
    else:
        parser_sim = parser.simulation(
            realtime=False,
            gui=True,
            trace=False,
            environment_process=environment.environment_process,
            stimuli=stimuli,
            triggers='space',
            times=40)

    antecedent_carried = "NO"
    what_antecedent_carried = None

    spr_times = []  #reaction times, recorded and returned

    word_parsed = 0
    last_time = 0

    while True:
        try:
            parser_sim.step()
            #print(parser_sim.current_event)
        except simpy.core.EmptySchedule:
            spr_times = [
                10 for _ in sentence
            ]  #if sth goes wrong, it's probably because it got stuck somewhere; in that case report time-out time per word (40 s) or nan
            break
        if parser_sim.show_time() > 60:
            spr_times = [
                10 for _ in sentence
            ]  #this takes care of looping or excessive time spent - break if you loop (40 s should be definitely enough to move on)
            break
        if parser_sim.current_event.action == "KEY PRESSED: SPACE":
            activation = used_activations[used_activations['position'].isin(
                [len(spr_times) + 1])]['activation'].to_numpy()[0]
            extra_rule_time = parser.model_parameters[
                "latency_factor"] * np.exp(
                    -parser.model_parameters["latency_exponent"] *
                    (activation * weight))
            # two things play a role - number of matching features; fan of each matching feature; explore these two separately
            if len(spr_times) not in tobe_removed:
                spr_times.append(parser_sim.show_time() + extra_rule_time -
                                 last_time)
            else:
                tobe_removed.remove(len(spr_times))
            last_time = parser_sim.show_time()
            word_parsed += 1

        if word_parsed >= len(sentence) - 4:  #we ignore the last FOUR words
            break

        #this below - carrying out an action

        if re.search("^RULE FIRED: recall action", parser_sim.current_event.action) or\
                                re.search("^RULE FIRED: move to last action", parser_sim.current_event.action):
            parser_sim.steps(2)  #exactly enough steps to make imaginal full

            cg = parser.goals["g"].pop()
            wi = parser.goals["word_info"].copy().pop()
            retrieve_wh = used_activations[used_activations['position'].isin(
                [word_parsed + 1])]['retrieve_wh'].to_numpy()[0]
            # make fake wh-antecedent and recall it right away (because the parser always tries to recall WP directly when reading WP (it is guessing we are dealing with subject relative clause)
            if str(wi.cat) == "WP":
                parser.decmem.add(actr.chunkstring(string="""
                isa         action_chunk
                TREE0_LABEL     WP
                TREE0_HEAD      wh"""),
                                  time=parser_sim.show_time())
                retrieve_wh = "quick"  #we assume (in line with McElree et al. that if you just postulated a wh, you don't need to directly retrieve it, it is still in active memory)
            reanalysis = used_activations[used_activations['position'].isin(
                [word_parsed + 1])]['reanalysis'].to_numpy()[0]
            parser.goals["g"].add(
                actr.chunkstring(string="""
    isa             reading
    position    """ + str(cg.position) + """
    reanalysis      """ + str(reanalysis) + """
    retrieve_wh     """ + str(retrieve_wh) + """
    state           finished_recall"""))

    final_times = [x for x in spr_times if x not in tobe_removed]
    if prints:
        print("FINAL TIMES")
        print(final_times)
        # with open('parses/parse_trees.txt', 'a+') as f:
        #     f.write(str(final_tree) + "\n")
        #     f.write("\n")
    return np.array(final_times[2:])
예제 #18
0
import pyactr as actr
import simpy
import re
import sys

# import warnings
# warnings.filterwarnings("ignore")

from parser_rules_fan import parser
from parser_dm_fan import environment

parser.goals["g"].add(
    actr.chunkstring(string="""
    isa             parsing_goal
    task            reading_word
    stack1          S
    right_frontier  S
    dref_peg        1
"""))

if __name__ == "__main__":

    simulate = int(sys.argv[1])

    sentence = "a lawyer is in a cave"
    sentence = sentence.split()

    dm = parser.decmem

    parser.retrieval.finst = 5
carLexeme = actr.makechunk(nameofchunk="car",
                           typename="word",
                           meaning="[[car]]",
                           category="noun",
                           number="sg",
                           synfunction="subject")

agreement = actr.ACTRModel()

dm = agreement.decmem
dm.add(carLexeme)

agreement.goal.add(
    actr.chunkstring(string="""
    isa goal_lexeme
    task agree
    category verb"""))

agreement.productionstring(name="retrieve",
                           string="""
    =g>
    isa goal_lexeme
    category verb
    task agree
    ?retrieval>
    buffer empty
    ==>
    =g>
    isa goal_lexeme
    task trigger_agreement
    category verb
예제 #20
0
"""

import pyactr as actr

actr.chunktype("parsing_goal", "stack_top stack_bottom parsed_word task")
actr.chunktype("sentence", "word1 word2 word3")
actr.chunktype("word", "form, cat")

parser = actr.ACTRModel()
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0.2)

dm.add(actr.chunkstring(string="""
    isa word
    form 'Mary'
    cat 'ProperN'
"""))
dm.add(actr.chunkstring(string="""
    isa word
    form 'Bill'
    cat 'ProperN'
"""))
dm.add(actr.chunkstring(string="""
    isa word
    form 'likes'
    cat 'V'
"""))

g.add(actr.chunkstring(string="""
    isa parsing_goal
예제 #21
0
environment = actr.Environment(focus_position=(320, 180))

actr.chunktype("parsing_goal",
               "task stack_top stack_bottom parsed_word right_frontier")
actr.chunktype("parse_state",
               "node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)

dm.add(actr.chunkstring(string="""
    isa  word
    form 'Mary'
    cat  'ProperN'
"""))
dm.add(actr.chunkstring(string="""
    isa  word
    form 'Bill'
    cat  'ProperN'
"""))
dm.add(actr.chunkstring(string="""
    isa  word
    form 'likes'
    cat  'V'
"""))
g.add(actr.chunkstring(string="""
    isa             parsing_goal
    task            read_word
예제 #22
0
                        latency_exponent=0.5, emma_noise=False,\
                        rule_firing=0.1, buffer_spreading_activation={"g":2},\
                        strength_of_association=2)

dm = parser.decmem
parser.goal
parser.set_goal(name="imaginal", delay=0)
parser.set_goal(name="discourse_context", delay=0)

# stemmer to generate non-logical constants for word meaning representations
stemmer = SnowballStemmer("english")

for det in ['the', 'a']:
    dm.add(actr.chunkstring(string="""
    isa  word
    form """+str(det)+"""
    cat  Det
    """))

for noun in ['doctor', 'hippie', 'lawyer', 'debutante','captain', 'fireman',\
             'cave', 'bank', 'park', 'church', 'town', 'shop']:
    dm.add(actr.chunkstring(string="""
    isa  word
    form """+str(noun)+"""
    cat  N
    pred """+str(noun).upper()+"""
    """))

for prep in ['in']:
    predicate = stemmer.stem(prep)
    dm.add(actr.chunkstring(string="""
예제 #23
0
파일: u2_demo.py 프로젝트: jakdot/pyactr
stimulus = random.sample(string.ascii_uppercase, 1)[0]
text = {1: {'text': stimulus, 'position': (100,100)}}
environ = actr.Environment(focus_position=(100,100))

m = actr.ACTRModel(environment=environ, motor_prepared=True)

actr.chunktype("chunk", "value")
actr.chunktype("read", "state")
actr.chunktype("image", "img")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="attend_let", typename="chunk", value="attend_let")
actr.makechunk(nameofchunk="response", typename="chunk", value="response")
actr.makechunk(nameofchunk="done", typename="chunk", value="done")
m.goal.add(actr.chunkstring(name="reading", string="""
        isa     read
        state   start"""))
g2 = m.set_goal("g2")
g2.delay = 0.2

t2 = m.productionstring(name="encode_letter", string="""
        =g>
        isa     read
        state   start
        =visual>
        isa     _visual
        value  =letter
        ==>
        =g>
        isa     read
        state   response
예제 #24
0
text = {1: {'text': stimulus, 'position': (100, 100)}}
environ = actr.Environment(focus_position=(100, 100))

m = actr.ACTRModel(environment=environ, motor_prepared=True)

actr.chunktype("chunk", "value")
actr.chunktype("read", "state")
actr.chunktype("image", "img")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="attend_let", typename="chunk", value="attend_let")
actr.makechunk(nameofchunk="response", typename="chunk", value="response")
actr.makechunk(nameofchunk="done", typename="chunk", value="done")
m.goal.add(
    actr.chunkstring(name="reading",
                     string="""
        isa     read
        state   start"""))
g2 = m.set_goal("g2")
g2.delay = 0.2

t2 = m.productionstring(name="encode_letter",
                        string="""
        =g>
        isa     read
        state   start
        =visual>
        isa     _visual
        value  =letter
        ==>
        =g>
        isa     read
예제 #25
0
environment = actr.Environment(focus_position=(320, 180))

actr.chunktype("parsing_goal",
               "task stack_top stack_bottom parsed_word right_frontier")
actr.chunktype("parse_state", "node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)

dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'Mary'
    cat  'ProperN'
"""))
dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'Bill'
    cat  'ProperN'
"""))
dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'likes'
    cat  'V'
"""))
g.add(
actr.chunktype("parse_state", "node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment,
                        subsymbolic=True,
                        retrieval_threshold=-5,
                        latency_factor=0.1,
                        latency_exponent=0.13)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)

dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'Mary'
    cat  'ProperN'
"""))

dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'The'
    cat  'Det'
"""))

dm.add(
    actr.chunkstring(string="""
    isa  word
    form 'the'
    cat  'Det'
예제 #27
0
def reading(sentence, dm):
    parser.goals["g"].add(
        actr.chunkstring(string="""
    isa               parsing_goal
    task              reading_word
    stack1            S
    right_edge_stack1 S
    right_edge_stack2 None
    discourse_status  at_issue
    dref_peg          x1
    drs_peg           d1
    event_peg         e1
    embedding_level   0
    """))

    parser.goals["imaginal"].add(
        actr.chunkstring(string="""
    isa             parse_state
    """))

    parser.goals["unresolved_discourse"].add(
        actr.chunkstring(string="isa drs"))

    sentence = sentence.split()

    parser.set_decmem(data={x: dm[x] for x in dm})

    parser.retrieval.finst = 5

    stimuli = []
    for word in sentence:
        stimuli.append({1: {'text': word, 'position': (320, 180)}})
    parser_sim = parser.simulation(
        realtime=False,
        gui=False,
        environment_process=environment.environment_process,
        stimuli=stimuli,
        triggers='space',
        times=100)

    elapsed_time = 0

    while True:
        try:
            parser_sim.step()
        except simpy.core.EmptySchedule:
            break

        if re.search("^KEY PRESSED: SPACE", str(parser_sim.current_event.action)) and\
                (environment.stimulus[1]["text"] == "argued" or environment.stimulus[1]["text"] == "played") and not elapsed_time:
            elapsed_time = parser_sim.show_time()
        elif re.search("^KEY PRESSED: SPACE", str(parser_sim.current_event.action)) and\
                elapsed_time:
            elapsed_time = parser_sim.show_time() - elapsed_time
            break

    sortedDM = sorted(([item[0], time] for item in parser.decmem.items()\
                       for time in item[1]),\
                       key=lambda item: item[1])

    return elapsed_time, sortedDM
environment = actr.Environment(focus_position=(320, 180))

actr.chunktype("parsing_goal",
               "task stack_top stack_bottom parsed_word right_frontier")
actr.chunktype("parse_state", "node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment, motor_prepared=True)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)

dm.add(
    actr.chunkstring(string="""
    isa  word
    form Mary
    cat  ProperN
"""))
dm.add(
    actr.chunkstring(string="""
    isa  word
    form Bill
    cat  ProperN
"""))
dm.add(actr.chunkstring(string="""
    isa  word
    form likes
    cat  V
"""))
g.add(
    actr.chunkstring(string="""
예제 #29
0
temp_dm = {}

temp_activations = {}

words = sentences_csv.groupby('word', sort=False)

actr.chunktype("word", "form cat")

actr.chunktype("parsing_goal", "task")

actr.chunktype("reading",
               "state position word reanalysis retrieve_wh what_retrieve tag")

actr.chunktype(
    "action_chunk",
    "ACTION ACTION_RESULT_LABEL ACTION_PREV WORD_NEXT0_LEX WORD_NEXT0_POS TREE0_LABEL TREE1_LABEL TREE2_LABEL TREE3_LABEL TREE0_HEAD TREE0_HEADPOS TREE0_LEFTCHILD TREE0_RIGHTCHILD TREE1_HEAD TREE1_HEADPOS TREE1_LEFTCHILD TREE1_RIGHTCHILD TREE2_HEAD TREE2_HEADPOS TREE3_HEAD ANTECEDENT_CARRIED"
)

for name, group in words:
    word = group.iloc[0]['word']
    function = sentences_csv[sentences_csv.word.isin(
        [word])].function.to_numpy()[0]
    temp_dm[actr.chunkstring(string="""
        isa  word
        form """ + '"' + str(word) + '"' + """
        cat  """ + str(function) + """
        """)] = np.array([0])

parser.decmems = {}
parser.set_decmem(temp_dm)
예제 #30
0
"""
A simple top-down parser.
"""

import pyactr as actr

actr.chunktype("parsing", "task stack_top stack_bottom parsed_word ")
actr.chunktype("sentence", "word1 word2 word3")

parser = actr.ACTRModel()

dm = parser.decmem
dm.add(actr.chunkstring(string="isa word form 'Mary' cat 'ProperN'"))
dm.add(actr.chunkstring(string="isa word form 'Bill' cat 'ProperN'"))
dm.add(actr.chunkstring(string="isa word form 'likes' cat 'V'"))

parser.goal.add(
    actr.chunkstring(string="isa parsing  task parse stack_top 'S'"))
parser.goal = "g2"
parser.goals["g2"].delay = 0.2
parser.goals["g2"].add(
    actr.chunkstring(
        string="isa sentence word1 'Mary' word2 'likes' word3 'Bill'"))

parser.productionstring(name="expand: S->NP VP",
                        string="""
        =g>
        isa         parsing
        task        parse
        stack_top   'S'
        ==>
예제 #31
0
counting = actr.ACTRModel()

#Each chunk type should be defined first.
actr.chunktype("countOrder", ("first", "second"))
#Chunk type is defined as (name, attributes)

#Attributes are written as an iterable (above) or as a string (comma-separated):
actr.chunktype("countOrder", "first, second")

actr.chunktype("countFrom", ("start", "end", "count"))

dm = counting.decmem #creates variable for declarative memory (easier to access)
dm.add(actr.chunkstring(string="""
    isa     countOrder
    first   1
    second  2
"""))
dm.add(actr.chunkstring(string="""
    isa     countOrder
    first   2
    second  3
"""))
dm.add(actr.chunkstring(string="""
    isa     countOrder
    first   3
    second  4
"""))
dm.add(actr.chunkstring(string="""
    isa     countOrder
    first   4
예제 #32
0
"""
A left-corner parser.
"""

import pyactr as actr

environment = actr.Environment(focus_position=(320, 180))

actr.chunktype("read", "state word goal_cat")
actr.chunktype("parsing", "top bottom")
actr.chunktype("word", "form cat")

parser = actr.ACTRModel(environment)

parser.decmem.add(
    actr.chunkstring(string="isa word form 'Mary' cat 'ProperN'"))
parser.decmem.add(
    actr.chunkstring(string="isa word form 'Bill' cat 'ProperN'"))
parser.decmem.add(actr.chunkstring(string="isa word form 'likes' cat 'V'"))

parser.goal.add(
    actr.chunkstring(string="""
        isa     read
        state   start
        goal_cat 'S'"""))
parser.goal = "g2"
parser.goals["g2"].add(
    actr.chunkstring(string="""
        isa     parsing
        top     'S'"""))
parser.goals["g2"].delay = 0.2
예제 #33
0
"""
CFG for the a^n b^n language.
"""

import pyactr as actr

cfg = actr.ACTRModel()

actr.chunktype("countOrder", "first, second")
actr.chunktype("countFrom", ("start", "end", "count", "terminal"))

dm = cfg.decmem
dm.add(
    actr.chunkstring(string="""
    isa         countOrder
    first       1
    second      2
"""))
dm.add(
    actr.chunkstring(string="""
    isa         countOrder
    first       2
    second      3
"""))
dm.add(
    actr.chunkstring(string="""
    isa         countOrder
    first       3
    second      4
"""))
dm.add(
예제 #34
0
    rule_firing=0.011,
    buffer_spreading_activation={
        "discourse_context": 2,
        "unresolved_discourse": 2
    },
    strength_of_association=4)

dm = parser.decmem
parser.goal
parser.set_goal(name="imaginal", delay=0)
parser.set_goal(name="discourse_context", delay=0)
parser.set_goal(name="unresolved_discourse", delay=0)

actr.chunkstring(name="EQUALS",
                 string="""
    isa  pred
    constant_name _equals_
    arity         2
""")

actr.chunkstring(name="MALE",
                 string="""
    isa  pred
    constant_name _male_
    arity         1
""")

actr.chunkstring(name="FEMALE",
                 string="""
    isa  pred
    constant_name _female_
    arity         1
예제 #35
0
"""
A basic model of grammar.
"""

import pyactr as actr

regular_grammar = actr.ACTRModel()

actr.chunktype("goal_chunk", "mother daughter1 daughter2 state")

dm = regular_grammar.decmem

regular_grammar.goal.add(
    actr.chunkstring(string="""
    isa         goal_chunk
    mother      NP
    state       rule
"""))

regular_grammar.productionstring(name="NP ==> N NP",
                                 string="""
    =g>
    isa         goal_chunk
    mother      NP
    daughter1   None
    daughter2   None
    state       rule
    ==>
    =g>
    isa         goal_chunk
    daughter1   N