def example():

    global ids

    actr.reset()

    actr.add_command("custom-object-chunk", custom_object_chunk,
                     "Command to dynamically create visual object")

    # Here we install our custom function using an object-creator device
    actr.install_device(["vision", "object-creator", "custom-object-chunk"])

    # Now we add two features to the display, the first of which will
    # have the object created by the custom function and the other by
    # the default mechanism.

    ids = actr.add_visicon_features(["screen-x", 10, "screen-y", 20],
                                    ["screen-x", 100, "screen-y", 20])

    # run to give vision module a chance to
    # process those features and then print the
    # visicon.

    actr.run_n_events(3)
    actr.print_visicon()

    # run to have model complete task
    actr.run(1)

    actr.remove_command("custom-object-chunk")
Example #2
0
def trials(n=200,reset=True,output=True):
    global responses,task_over,exp_length,window

    if reset:
        actr.reset()

    window = actr.open_exp_window("Compilation task",visible=False)
    times = []
    responses = []
    task_over = False
    exp_length = n
    present_next_trial()
    actr.install_device(window)
#    actr.add_command('compilation-issues-game-over',game_over,"Test for the production compilation issues game being over")
    actr.add_command('compilation-issues-response',respond_to_key_press,"Compilation issues key press response monitor")
    actr.monitor_command('output-key','compilation-issues-response')

#   this is how the original ran: actr.run_until_condition('compilation-issues-game-over')
#   however performing a remote call for each event to determine the stopping point
#   takes almost 2 orders of magnitude longer to run!  So instead just run 
#   sufficiently long to complete the task and assume the model stops when there's
#   nothing left to do.

    actr.run(20000)

    actr.remove_command_monitor('output-key','compilation-issues-response')
    actr.remove_command ('compilation-issues-response')
#    actr.remove_command ('compilation-issues-game-over')

    return analyze_results(output)
Example #3
0
def single_trial(prime_stimulus, **param_set):
    """
    This function simulates an single trial. At the begining of each trial, the model is reset.
    The model's response is collected as either DO/PO for a simplified version of full sentence
    :param prime_stimulus: dict type, the prime stimulus, indicating the condition
    :return:
    """

    global response
    response = False

    while not response:
        actr.reset()
        actr.install_device(("speech", "microphone"))
        if param_set: set_parameters(**param_set)        #reset param
        # actr.record_history('BUFFER-TRACE','production-graph-utility')

        # actr.record_history('buffer-trace', 'goal')
        # actr.set_parameter_value(':v', 't')
        syntax = prime_stimulus[-3]
        syntax_corr = prime_stimulus[-1]

        actr.add_command("model1-key-press", respond_to_speech,
                         "model1 task output-key monitor")
        actr.monitor_command("output-speech", "model1-key-press")



        task1(prime_stimulus)
        task2()

        actr.remove_command_monitor("output-speech", "model1-key-press")
        actr.remove_command("model1-key-press")

    return response
Example #4
0
def demo_table():
    actr.reset()
    #init()

    #...when manually setting the sentence (without syntetizer)
    text = "give fork"

    #...when using Aldebran proxy for speech recognition
    #text = AL_speech_recognition()

    #...when using Google Api (no Python 2.7)
    #text = GAPI_speech_recognition()

    string = tokenize(text)
    onset = 0
    actr.set_parameter_value(":sound-decay-time", 0.2)
    #actr.set_parameter_value(":save-audicon-history", True)
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    for word in string:
        if TOK.descr[word.kind] == "WORD":
            print(str(word.txt))
            actr.new_word_sound(str(word.txt), onset)
            onset = onset + 0.2
    actr.run(30)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
Example #5
0
def setupACTR():
    actr.load_act_r_model(
        "/Users/paulsomers/ddpg-craft/scripts/ddpg_agent.lisp")
    actr.wait = True
    #run ACTR for 10 seconds (simulated)
    chk = actr.define_chunks(['wait', 'false'])

    actrThread = threading.Thread(target=actr.run, args=[1000])
    actrThread.start()
    actr.schedule_simple_event_now("set-buffer-chunk", ['imaginal', chk[0]])

    #once loaded add some functions that will be called by ACT-R
    actr.add_command("tic", do_tic)
    actr.add_command('set_response', set_response)

    #Setup the imaginal for an initial production
    chunk = ['isa' 'setup']

    #Wait until the first production fires
    # once act-r is running, the tic will be called to set the wait (below) to false
    while actr.wait:
        time.sleep(0.001)
        print("waiting")

    return 1
Example #6
0
def agi ():
  
    actr.reset()
  
    # open a window and add the text
  
    window = actr.open_exp_window("Moving X", visible=True)
    text = actr.add_text_to_exp_window(window, "x", x=10, y=10)
    y = 10
    
    actr.install_device(window)
    
    # schedule an event to move it but don't have the 
    # periodic-event command available at this point so
    # just use a relative event which schedules itself
    
    actr.add_command("agi-move-it",move_text)

    actr.schedule_event_relative (1, "agi-move-it",params=[text,y],maintenance=True)

    # run the model in real time since it's a visible window
    
    actr.run(3,True)

    actr.remove_command("agi-move-it")
Example #7
0
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    text1 = items[0]
    window = actr.open_exp_window("Letter recognition")

    actr.add_text_to_exp_window(window, text1, x=125, y=150)

    actr.add_command("demo2-key-press", respond_to_key_press,
                     "Demo2 task output-key monitor")
    actr.monitor_command("output-key", "demo2-key-press")

    global response
    response = False

    if human == True:
        while response == False:
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "demo2-key-press")
    actr.remove_command("demo2-key-press")

    return response
Example #8
0
def person():
    global response

    window = actr.open_exp_window("Choice Experiment", visible=True)

    actr.add_command("choice-response", respond_to_key_press,
                     "Choice task key response")
    actr.monitor_command("output-key", "choice-response")

    actr.add_text_to_exp_window(window, 'choose', x=50, y=100)

    response = ''

    while response == '':
        actr.process_events()

    actr.clear_exp_window(window)

    if actr.random(1.0) < .9:
        answer = 'heads'
    else:
        answer = 'tails'

    actr.add_text_to_exp_window(window, answer, x=50, y=100)

    start = actr.get_time(False)

    while (actr.get_time(False) - start) < 1000:
        actr.process_events()

    actr.remove_command_monitor("output-key", "choice-response")
    actr.remove_command("choice-response")

    return response
Example #9
0
def task(which=False):
    global response, response_time

    actr.reset()
    alphabet = [
        "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
        "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
    ]
    letter = actr.permute_list(alphabet)[0]
    alphabet = ["Z"] + alphabet

    if which == 'next' or which == 'previous':
        task = which
    elif actr.random(2) == 0:
        task = 'next'
    else:
        task = 'previous'

    time = 1500 + actr.random(1000)

    window = actr.open_exp_window("Simple task")

    actr.install_device(window)

    actr.add_command("pm-issues-response", respond_to_key_press,
                     "Perceptual-motor issues task response")
    actr.monitor_command("output-key", "pm-issues-response")

    actr.add_text_to_exp_window(window, letter, x=130, y=150)

    actr.add_command("pm-issue-display", display_prompt,
                     "Perceptual-motor issues task prompt display")

    actr.schedule_event_relative(time,
                                 "pm-issue-display",
                                 params=[window, task],
                                 time_in_ms=True)

    response = []
    response_time = False

    actr.run(10, True)

    actr.remove_command("pm-issue-display")
    actr.remove_command_monitor("output-key", "pm-issues-response")
    actr.remove_command("pm-issues-response")

    if (len(response) == 2 and response_time > time and response[0] == letter
            and ((task == 'next' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) - 1)) or
                 (task == 'previous' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) + 1)))):
        result = True
    else:
        result = False

    return [task, result]
Example #10
0
def add_speech_monitor():
    global monitor_installed

    if monitor_installed == False:
        actr.add_command("siegler-response",record_model_speech,"Siegler task model response")
        actr.monitor_command("output-speech","siegler-response")
        monitor_installed = True
        return True
    else:
        return False
Example #11
0
def demo_mirror():
    init()
    actr.reset()
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    actr.run(60)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
Example #12
0
def trials(n,cont=False,v=False):

    global report,word_list,reward_check
    
    actr.add_command("reward-check",verify_reward,
                     "Past tense code check for a reward each trial.")
    actr.monitor_command("trigger-reward","reward-check")

    if not(cont) or not(word_list):
        actr.reset()
        word_list = make_word_freq_list(verbs)
        new = []
        for x in word_list:
            for y in x[1:]:
                if y not in new:
                    new.append(y)
        for x in new:
            if not(actr.chunk_p(x)):
                actr.define_chunks([x])

        print_header()
        report = []
 
    actr.set_parameter_value(":v",v)

    start = 100 * math.floor(len(report) / 100)
    count = len(report) % 100

    for i in range(n):
        add_past_tense_to_memory()
        add_past_tense_to_memory()
        reward_check = False
        target = make_one_goal()
        duration = actr.run(100)[0]
        add_to_report(target,actr.buffer_read('imaginal'))
        actr.clear_buffer('imaginal')
        count += 1
        
        if count == 100:
            rep_f_i(start, start + 100, 100)
            count = 0
            start += 100
        if not(reward_check):
            actr.print_warning("Model did not receive a reward when given %s."% target[0])

        actr.run_full_time(200 - duration)

        if duration == 100:
            actr.print_warning("Model spent 100 seconds generating a past tense for %s."%
                               target[0])

    rep_f_i(start,start+count,100)

    actr.remove_command_monitor("trigger-reward","reward-check")
    actr.remove_command("reward-check")
Example #13
0
def play(player1, player2):
    global p1, p2, p1_position, p2_position, game_over, current_player, next_move

    # make sure that both names are valid model names

    if (player1.lower() in (x.lower() for x in actr.mp_models())) and (
            player2.lower() in (x.lower() for x in actr.mp_models())):
        actr.reset()

        actr.add_command('make-move', make_move,
                         "Model action function in simple capture game.")

        # create the goal chunks

        actr.set_current_model(player1)

        actr.define_chunks([
            'goal', 'isa', 'play', 'my-pos', 'p1', 'p1', 0, 'p2', 5, 'state',
            'move'
        ])

        actr.goal_focus('goal')

        actr.set_current_model(player2)

        actr.define_chunks([
            'goal', 'isa', 'play', 'my-pos', 'p1', 'p2', 0, 'p2', 5, 'state',
            'move'
        ])

        actr.goal_focus('goal')

        # initialize the game information

        p1 = player1
        p2 = player2
        p1_position = 0
        p2_position = 5
        game_over = False
        current_player = player1
        next_move = None

        while not (game_over):
            # until a move is made which breaks the run
            # or 1000 simulated seconds pass which is a forfeit
            actr.run(1000)
            process_move(next_move)

        # Give them a chance to process results

        actr.run_full_time(3)

        actr.remove_command('make-move')

    return game_over
 def experiment_initialization(self, vis=False):
     # actr.reset()
     # actr.reload()
     actr.add_command("unit2-key-press", self.respond_to_key_press,
                      "Assignment 2 task output-key monitor")
     actr.add_command("my-event-hook", self.post_event_hook,
                      "called after an event")
     actr.monitor_command("output-key", "unit2-key-press")
     actr.call_command("add-post-event-hook", "my-event-hook")
     self.window = actr.open_exp_window("Leter difference task",
                                        visible=vis)
Example #15
0
def add_key_monitor():
    global key_monitor_installed

    if key_monitor_installed == False:
        actr.add_command("1hit-bj-key-press", respond_to_keypress,
                         "1-hit blackjack task key output monitor")
        actr.monitor_command("output-key", "1hit-bj-key-press")
        key_monitor_installed = True
        return True
    else:
        return False
Example #16
0
def trial(onset_time):

    actr.reset()

    letters = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    answers = []
    row = actr.random(3)
    window = actr.open_exp_window("Sperling Experiment", visible=True)

    for i in range(3):
        for j in range(4):
            txt = letters[j + (i * 4)]
            if i == row:
                answers.append(txt)
            actr.add_text_to_exp_window(window,
                                        txt,
                                        x=(75 + (j * 50)),
                                        y=(100 + (i * 50)))

    actr.install_device(window)

    if row == 0:
        freq = 2000
    elif row == 1:
        freq = 1000
    else:
        freq = 500

    actr.new_tone_sound(freq, .5, onset_time)
    actr.schedule_event_relative(900 + actr.random(200),
                                 "clear-exp-window",
                                 params=[window],
                                 time_in_ms=True)

    global responses
    responses = []

    actr.add_command("sperling-response", respond_to_key_press,
                     "Sperling task key press response monitor")
    actr.monitor_command("output-key", "sperling-response")

    actr.run(30, True)

    actr.remove_command_monitor("output-key", "sperling-response")
    actr.remove_command("sperling-response")

    if show_responses:
        print("answers: %s" % answers)
        print("responses: %s" % responses)

    return (compute_score(answers))
Example #17
0
def recall():

    actr.add_command(
        "grouped-response", record_response,
        "Response recording function for the tutorial grouped model.")
    global response
    response = []
    actr.reset()
    actr.run(20)
    actr.remove_command("grouped-response")
    return response
Example #18
0
    def simulate(self, trace=False, utility_offset=True):
        """Runs SP simulations using real stimuli"""
        # Function hook to modify the utility calculation
        # (will add a mismatch penalty). Need to be defined
        # before the model is loaded
        
        actr.add_command("parser-offset", self.utility_offset,
                         "Calculates a mismatch penalty for AI condition")

        actr.load_act_r_model(self.model)

        
        for condition in self.CONDITIONS:
            self.current_condition = condition
            subset = [t for t in self.trials if t.condition == condition]
            for j in range(self.n):
                actr.reset()

                # Make the model silent in case
                
                if not trace:
                    actr.set_parameter_value(":V", False)

                # The model does not really need a visual interface,
                # but the default AGI provides a virtual mic to record
                # voice output.
        
                win = actr.open_exp_window("SP", width = 80,
                                           height = 60,
                                           visible=False)
                actr.install_device(win)

                # Function hooks to record the model responses. 
                
                actr.add_command("record-response", self.record_response,
                                 "Accepts a response for the SP task")
                actr.monitor_command("output-speech",
                                     "record-response")

                
                # Run a single trial in the given condition
                
                trial = random.choice(subset)
                self.run_trial(trial)

                # Clean up the function hooks
                
                actr.remove_command_monitor("output-speech",
                                            "record-response")
                actr.remove_command("record-response")
                
        # Removes the offset
        actr.remove_command("parser-offset")
Example #19
0
def task(size, trials, human=False):

    actr.add_command("paired-response", respond_to_key_press,
                     "Paired associate task key press response monitor")
    actr.monitor_command("output-key", "paired-response")

    result = do_experiment(size, trials, human)

    actr.remove_command_monitor("output-key", "paired-response")
    actr.remove_command("paired-response")

    return result
Example #20
0
    def simulate(self):
        """Runs a single simulation"""

        # Add commands and hooks
        actr.add_command("v_offset", self.chunk_v_term,
                         "Extra term in activation")

        actr.add_command("spreading", self.spreading_activation,
                         "Overrides normal spreading activation algorithm")

        actr.add_command("monitor_retrievals", self.monitor_retrievals,
                         "Monitors what is being retrieved")

        actr.add_command("next", self.present_new_situation,
                         "Presents a new situation")

        actr.add_command("keep_table", self.add_chunk)

        # Makes sure we are loading the current model from
        # the current directory
        curr_dir = os.path.dirname(os.path.realpath(__file__))
        actr.load_act_r_model(os.path.join(curr_dir, self.model))

        actr.set_parameter_value(":V", False)
        actr.set_parameter_value(":cmdt", False)

        # Apply the set of provided parameters
        for param, value in self.model_params.items():
            actr.set_parameter_value(param, value)

        # Run a life simulation

        event_time = 0.0

        while actr.mp_time() < self.max_time:
            actr.schedule_event(event_time, "next")
            event_time += self.event_step
            actr.run(self.event_step)  # No need to run beyond the event step

        # Clean-up

        actr.remove_command("next")
        actr.remove_command("v_offset")
        actr.remove_command("spreading")
        actr.remove_command("keep_table")
        actr.remove_command("monitor_retrievals")

        # Update counter

        self.counter += 1
Example #21
0
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    target = items[0]
    foil = items[1]
    window = actr.open_exp_window("Letter difference")
    text1 = foil
    text2 = foil
    text3 = foil
    index = actr.random(3)

    if index == 0:
        text1 = target
    elif index == 1:
        text2 = target
    else:
        text3 = target

    actr.add_text_to_exp_window(window, text1, x=125, y=75)
    actr.add_text_to_exp_window(window, text2, x=75, y=175)
    actr.add_text_to_exp_window(window, text3, x=175, y=175)

    actr.add_command("unit2-key-press", respond_to_key_press,
                     "Assignment 2 task output-key monitor")
    actr.monitor_command("output-key", "unit2-key-press")

    global response
    response = ''

    if human == True:
        while response == '':
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "unit2-key-press")
    actr.remove_command("unit2-key-press")

    if response.lower() == target.lower():
        return True
    else:
        return False
def test_it ():

    actr.reset()
  
    actr.add_command("relay-speech-output", relay_speech_output, "Handle player speak actions")
    actr.monitor_command("output-speech","relay-speech-output")

    for m in actr.mp_models():
        actr.set_current_model(m)
        actr.install_device(["speech","microphone"])

    actr.run(10)
    
    actr.remove_command_monitor("output-speech", "relay-speech-output")    
    actr.remove_command("relay-speech-output")
Example #23
0
def sentence(person, location, target, term):

    actr.reset()

    window = actr.open_exp_window("Sentence Experiment",
                                  visible=False,
                                  width=600,
                                  height=300)
    x = 25

    actr.install_device(window)

    actr.add_command("fan-response", respond_to_key_press,
                     "Fan experiment model response")
    actr.monitor_command("output-key", "fan-response")

    if term == 'person':
        actr.pdisable("retrieve-from-location")
    else:
        actr.pdisable("retrieve-from-person")

    actr.add_text_to_exp_window(window, person, x=50, y=150, width=75)
    actr.add_text_to_exp_window(window, location, x=250, y=150, width=75)

    global response, response_time

    response = ''
    response_time = 0

    actr.run(30)

    actr.remove_command_monitor("output-key", "fan-response")
    actr.remove_command("fan-response")

    if response == '':
        return (30, False)
    elif target:
        if response.lower() == 'k'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
    else:
        if response.lower() == 'd'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
Example #24
0
def reset_actr():

    model_name = 'egocentric-salience.lisp'
    model_path = '/Users/paulsomers/COGLE/deep_salience/'

    chunk_file_name = 'chunks.pkl'
    chunk_path = os.path.join(model_path, 'data')

    actr.add_command('similarity_function', similarity)
    actr.load_act_r_model(os.path.join(model_path, model_name))
    actr.record_history("blending-trace")

    #load all the chunks
    allchunks = pickle.load(
        open(os.path.join(chunk_path, chunk_file_name), 'rb'))
    for chunk in allchunks:
        actr.add_dm(chunk)
Example #25
0
def task(trials):
    """
    This function present task and monitor response from model
    :param size: number of trials to present
    :param trials: the trial list
    :return:
    """

    # monitor the output-key
    actr.add_command("paired-response", respond_to_key_press,
                     "Paired associate task key press response monitor")
    actr.monitor_command("output-key", "paired-response")

    result = do_experiment(trials)

    actr.remove_command_monitor("output-key", "paired-response")
    actr.remove_command("paired-response")
    return result
Example #26
0
def collect_responses(count):
    global results

    results = []

    actr.add_command("zbrodoff-response", respond_to_key_press,
                     "Zbrodoff task key press response monitor")
    actr.monitor_command("output-key", "zbrodoff-response")

    present_trial(trials[0])

    if run_model:
        actr.run(10 * count)
    else:
        while len(results) < count:
            actr.process_events()

    actr.remove_command_monitor("output-key", "zbrodoff-response")
    actr.remove_command("zbrodoff-response")
Example #27
0
def arbitrary ():
  
    actr.reset()
  
    feature = actr.add_visicon_features(['screen-x',15,'screen-y',20,'value',"'x'"])[0]
    x = 15

    # schedule an event to move it but don't have the 
    # periodic-event command available at this point so
    # just use a relative event which schedules itself
    
    actr.add_command("arbitrary-move-it",move_feat)

    actr.schedule_event_relative (1, "arbitrary-move-it",params=[feature,x],maintenance=True)

    # run the model in real time since it's a visible window
    
    actr.run(3,True)

    actr.remove_command("arbitrary-move-it")
def example ():
  
    actr.reset()
  
    """
    We will create two items in the display and have the model
    find them and move the mouse to them to see the difference
    in timing based on the width function setting.

    One feature will use the normal visual-location and object
    chunk-types and the other will use a custom visual-location
    type defined like this which replaces screen-x, screen-y,
    and distance with slots named x, y, and z instead.

    (chunk-type custom-location x y z height width size)

    The feature with using the standard visual-location slots
    will include a custom width function to make it act like
    a circle with twice its given height.
    """

    actr.add_command("custom-width", custom_width, "Return twice the height of a visual feature.")
  
  
    actr.add_visicon_features(['screen-x', 50, 'screen-y', 500, 'height', 20, 'width', 20, ':width-fn', "'custom-width'"],
                              ['isa', 'custom-location', 'x', 150, 'y', 500, 'height', 20, 'width', 20, ':x-slot', 'x', ':y-slot', 'y', ':z-slot', 'z'])
  
    # Give the vision module a chance to process the display
    # before printing the visicon.
  
    actr.run_n_events(3)
    actr.print_visicon()
  
    # run the model to move the cursor to 100,0 as a starting point.
    # and from there move to the left location, back to the start, and then
    # the right location.
  
    actr.run(10)
  
    actr.remove_command('custom-width')
Example #29
0
def present_trial(given_letter = None, train_epochs = 1,
            new_window = True, open_window = True, delay_time = 10, display = False):

    global window, window_reading, clickList, chunk_names, chunk_defs

    if (given_letter == None):
        letterList = actr.permute_list(list(string.ascii_uppercase))
        chosen_letter = letterList[0]
    else:
        chosen_letter = given_letter

    if display:
        train_n(10, chosen_letter)

    clickList = []

    actr.clear_exp_window(window_reading)
    actr.clear_exp_window(window)

    actr.add_text_to_exp_window(window_reading, chosen_letter, 125, 150)

    actr.add_command('process-click',process_click)
    actr.monitor_command('click-mouse','process-click')

    actr.run(1000, open_window)        # for actr time or real time

    correct = correct_drawing(chosen_letter, clickList)

    if (correct):
        train_once(chosen_letter)
        actr.run_full_time(delay_time)

    else:
        train_n(train_epochs, chosen_letter)
        actr.run_full_time(delay_time)

    actr.remove_command_monitor('click-mouse', 'process-click')
    actr.remove_command('process-click')

    return (clickList, correct)
Example #30
0
def test2(syntax, syntax_corr):

    actr.reset()
    prime_template = ['isa', 'sentence',
                      'string', '...',
                      'noun1', 'n1',
                      'noun2', 'n2',
                      'verb', 'v',
                      'syntax', syntax,
                      'syntax-corr', syntax_corr]

    actr.add_command("model1-key-press", respond_to_speech,
                     "model1 task output-key monitor")
    actr.monitor_command("output-speech", "model1-key-press")
    # spreading activation
    # if syntax_corr == 'no':
    #     print('disable both')
    #     actr.pdisable("step5-1")
    #     actr.pdisable("step5-2")
    # else:
    #     actr.pdisable('step5-3')
    #     if syntax == 'DO':
    #         print('disable5-2')
    #         actr.pdisable("step5-2")
    #     else:
    #         print('disable5-1')
    #         actr.pdisable("step5-1")

    global response
    response = False

    task1(prime_template)
    task2()

    actr.remove_command_monitor("output-speech", "model1-key-press")
    actr.remove_command("model1-key-press")
    return response