def example():

    global ids

    actr.reset()

    actr.add_command("custom-object-chunk", custom_object_chunk,
                     "Command to dynamically create visual object")

    # Here we install our custom function using an object-creator device
    actr.install_device(["vision", "object-creator", "custom-object-chunk"])

    # Now we add two features to the display, the first of which will
    # have the object created by the custom function and the other by
    # the default mechanism.

    ids = actr.add_visicon_features(["screen-x", 10, "screen-y", 20],
                                    ["screen-x", 100, "screen-y", 20])

    # run to give vision module a chance to
    # process those features and then print the
    # visicon.

    actr.run_n_events(3)
    actr.print_visicon()

    # run to have model complete task
    actr.run(1)

    actr.remove_command("custom-object-chunk")
Esempio n. 2
0
def trials(n=200,reset=True,output=True):
    global responses,task_over,exp_length,window

    if reset:
        actr.reset()

    window = actr.open_exp_window("Compilation task",visible=False)
    times = []
    responses = []
    task_over = False
    exp_length = n
    present_next_trial()
    actr.install_device(window)
#    actr.add_command('compilation-issues-game-over',game_over,"Test for the production compilation issues game being over")
    actr.add_command('compilation-issues-response',respond_to_key_press,"Compilation issues key press response monitor")
    actr.monitor_command('output-key','compilation-issues-response')

#   this is how the original ran: actr.run_until_condition('compilation-issues-game-over')
#   however performing a remote call for each event to determine the stopping point
#   takes almost 2 orders of magnitude longer to run!  So instead just run 
#   sufficiently long to complete the task and assume the model stops when there's
#   nothing left to do.

    actr.run(20000)

    actr.remove_command_monitor('output-key','compilation-issues-response')
    actr.remove_command ('compilation-issues-response')
#    actr.remove_command ('compilation-issues-game-over')

    return analyze_results(output)
Esempio n. 3
0
def demo_table():
    actr.reset()
    #init()

    #...when manually setting the sentence (without syntetizer)
    text = "give fork"

    #...when using Aldebran proxy for speech recognition
    #text = AL_speech_recognition()

    #...when using Google Api (no Python 2.7)
    #text = GAPI_speech_recognition()

    string = tokenize(text)
    onset = 0
    actr.set_parameter_value(":sound-decay-time", 0.2)
    #actr.set_parameter_value(":save-audicon-history", True)
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    for word in string:
        if TOK.descr[word.kind] == "WORD":
            print(str(word.txt))
            actr.new_word_sound(str(word.txt), onset)
            onset = onset + 0.2
    actr.run(30)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
Esempio n. 4
0
def do_experiment(trials, test=False):
    """
    This function run the experiment, and return simulated model behavior
    :param size:
    :param trials:
    :param human:
    :return:
    """
    #TODO: need to comment if seperate to core and body script
    # actr.reset()

    result = []

    window = actr.open_exp_window("Gambling Experiment", visible=False)
    actr.install_device(window)

    for trial in trials:
        # time = 0
        prompt, feedback, block_type = trial

        # guess
        response, time = do_guess(prompt, window)

        # this  test is to see if model can learn feedback
        if test: feedback = test_unit5(response)

        # encode feedback
        do_feedback(feedback, window)
        result.append((feedback, block_type, response, time))

    return result
def experiment(subject_num=999, condition=1, data_dir="."):
    if not os.path.isdir('{}'.format(data_dir)):
        os.mkdir('{}'.format(data_dir))
    t1 = RLWM("data/{}/block_df.csv".format(subject_num), subject_num,
              condition)
    t1.start = time.monotonic()
    t1.experiment_initialization(vis=False)
    #Initial Transition
    t1.schedule_initial_stimuli(t1.set_sizes[0], True)
    logger.info("%s:state %s, experiment():initial transition" %
                ("{0:.2f}".format(t1.curr_time()), t1.current_state))
    actr.install_device(t1.window)
    # actr.run(10, False) # debugging
    actr.run(1200, False)  #<-- run() blocks
    t1.write_data(data_dir)
    t1.experiment_cleanup()
    actr.remove_device(t1.window)
    t2 = RLWMTestRandom("data/{}/test_phase_random.csv".format(subject_num),
                        subject_num, condition)
    t2.start = time.monotonic()
    t2.experiment_initialization(vis=False)
    # put up test in 7 minutes
    t2.schedule_initial_stimuli(7 * 60)
    # t2.schedule_initial_stimuli(0) # for debugging
    logger.info("%s:state %s, experiment():test initial transition" %
                ("{0:.2f}".format(t2.curr_time()), t2.current_state))
    actr.install_device(t2.window)
    # need to run for additional 7 minutes for OSPAN/NBack break
    actr.run(7 * 60 + 1200, False)  #<-- run() blocks
    # actr.run(20+7*60,False)            # debugging
    t2.write_data(data_dir)
    t2.experiment_cleanup()
    actr.remove_device(t2.window)
Esempio n. 6
0
def agi ():
  
    actr.reset()
  
    # open a window and add the text
  
    window = actr.open_exp_window("Moving X", visible=True)
    text = actr.add_text_to_exp_window(window, "x", x=10, y=10)
    y = 10
    
    actr.install_device(window)
    
    # schedule an event to move it but don't have the 
    # periodic-event command available at this point so
    # just use a relative event which schedules itself
    
    actr.add_command("agi-move-it",move_text)

    actr.schedule_event_relative (1, "agi-move-it",params=[text,y],maintenance=True)

    # run the model in real time since it's a visible window
    
    actr.run(3,True)

    actr.remove_command("agi-move-it")
Esempio n. 7
0
def single_trial(prime_stimulus, **param_set):
    """
    This function simulates an single trial. At the begining of each trial, the model is reset.
    The model's response is collected as either DO/PO for a simplified version of full sentence
    :param prime_stimulus: dict type, the prime stimulus, indicating the condition
    :return:
    """

    global response
    response = False

    while not response:
        actr.reset()
        actr.install_device(("speech", "microphone"))
        if param_set: set_parameters(**param_set)        #reset param
        # actr.record_history('BUFFER-TRACE','production-graph-utility')

        # actr.record_history('buffer-trace', 'goal')
        # actr.set_parameter_value(':v', 't')
        syntax = prime_stimulus[-3]
        syntax_corr = prime_stimulus[-1]

        actr.add_command("model1-key-press", respond_to_speech,
                         "model1 task output-key monitor")
        actr.monitor_command("output-speech", "model1-key-press")



        task1(prime_stimulus)
        task2()

        actr.remove_command_monitor("output-speech", "model1-key-press")
        actr.remove_command("model1-key-press")

    return response
Esempio n. 8
0
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    text1 = items[0]
    window = actr.open_exp_window("Letter recognition")

    actr.add_text_to_exp_window(window, text1, x=125, y=150)

    actr.add_command("demo2-key-press", respond_to_key_press,
                     "Demo2 task output-key monitor")
    actr.monitor_command("output-key", "demo2-key-press")

    global response
    response = False

    if human == True:
        while response == False:
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "demo2-key-press")
    actr.remove_command("demo2-key-press")

    return response
def do_experiment(sticks, human=False):
    build_display(*sticks)

    if human:
        wait_for_human()
    else:
        actr.install_device(window)
        actr.start_hand_at_mouse()
        actr.run(100, visible)
Esempio n. 10
0
def task(which=False):
    global response, response_time

    actr.reset()
    alphabet = [
        "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
        "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
    ]
    letter = actr.permute_list(alphabet)[0]
    alphabet = ["Z"] + alphabet

    if which == 'next' or which == 'previous':
        task = which
    elif actr.random(2) == 0:
        task = 'next'
    else:
        task = 'previous'

    time = 1500 + actr.random(1000)

    window = actr.open_exp_window("Simple task")

    actr.install_device(window)

    actr.add_command("pm-issues-response", respond_to_key_press,
                     "Perceptual-motor issues task response")
    actr.monitor_command("output-key", "pm-issues-response")

    actr.add_text_to_exp_window(window, letter, x=130, y=150)

    actr.add_command("pm-issue-display", display_prompt,
                     "Perceptual-motor issues task prompt display")

    actr.schedule_event_relative(time,
                                 "pm-issue-display",
                                 params=[window, task],
                                 time_in_ms=True)

    response = []
    response_time = False

    actr.run(10, True)

    actr.remove_command("pm-issue-display")
    actr.remove_command_monitor("output-key", "pm-issues-response")
    actr.remove_command("pm-issues-response")

    if (len(response) == 2 and response_time > time and response[0] == letter
            and ((task == 'next' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) - 1)) or
                 (task == 'previous' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) + 1)))):
        result = True
    else:
        result = False

    return [task, result]
Esempio n. 11
0
def demo_mirror():
    init()
    actr.reset()
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    actr.run(60)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
Esempio n. 12
0
def trial(onset_time):

    actr.reset()

    letters = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    answers = []
    row = actr.random(3)
    window = actr.open_exp_window("Sperling Experiment", visible=True)

    for i in range(3):
        for j in range(4):
            txt = letters[j + (i * 4)]
            if i == row:
                answers.append(txt)
            actr.add_text_to_exp_window(window,
                                        txt,
                                        x=(75 + (j * 50)),
                                        y=(100 + (i * 50)))

    actr.install_device(window)

    if row == 0:
        freq = 2000
    elif row == 1:
        freq = 1000
    else:
        freq = 500

    actr.new_tone_sound(freq, .5, onset_time)
    actr.schedule_event_relative(900 + actr.random(200),
                                 "clear-exp-window",
                                 params=[window],
                                 time_in_ms=True)

    global responses
    responses = []

    actr.add_command("sperling-response", respond_to_key_press,
                     "Sperling task key press response monitor")
    actr.monitor_command("output-key", "sperling-response")

    actr.run(30, True)

    actr.remove_command_monitor("output-key", "sperling-response")
    actr.remove_command("sperling-response")

    if show_responses:
        print("answers: %s" % answers)
        print("responses: %s" % responses)

    return (compute_score(answers))
Esempio n. 13
0
def present_trial(trial, new_window=True):
    if new_window:
        w = actr.open_exp_window("Alpha-arithmetic Experiment",
                                 visible=trial.visible)
        if run_model:
            actr.install_device(w)
    else:
        actr.clear_exp_window()

    actr.add_text_to_exp_window(None, trial.text, x=100, y=150)

    trial.start = actr.get_time(run_model)
Esempio n. 14
0
    def simulate(self, trace=False, utility_offset=True):
        """Runs SP simulations using real stimuli"""
        # Function hook to modify the utility calculation
        # (will add a mismatch penalty). Need to be defined
        # before the model is loaded
        
        actr.add_command("parser-offset", self.utility_offset,
                         "Calculates a mismatch penalty for AI condition")

        actr.load_act_r_model(self.model)

        
        for condition in self.CONDITIONS:
            self.current_condition = condition
            subset = [t for t in self.trials if t.condition == condition]
            for j in range(self.n):
                actr.reset()

                # Make the model silent in case
                
                if not trace:
                    actr.set_parameter_value(":V", False)

                # The model does not really need a visual interface,
                # but the default AGI provides a virtual mic to record
                # voice output.
        
                win = actr.open_exp_window("SP", width = 80,
                                           height = 60,
                                           visible=False)
                actr.install_device(win)

                # Function hooks to record the model responses. 
                
                actr.add_command("record-response", self.record_response,
                                 "Accepts a response for the SP task")
                actr.monitor_command("output-speech",
                                     "record-response")

                
                # Run a single trial in the given condition
                
                trial = random.choice(subset)
                self.run_trial(trial)

                # Clean up the function hooks
                
                actr.remove_command_monitor("output-speech",
                                            "record-response")
                actr.remove_command("record-response")
                
        # Removes the offset
        actr.remove_command("parser-offset")
Esempio n. 15
0
def run_experiment(model_name="response-monkey.lisp",
                   time=200,
                   verbose=True,
                   visible=True,
                   trace=True,
                   params=[]):
    """Runs an experiment"""
    actr.reset()
    # current directory
    curr_dir = os.path.dirname(os.path.realpath(__file__))
    actr.load_act_r_model(os.path.join(curr_dir, model_name))

    # Set then model parameters
    for name, val in params:
        actr.set_parameter_value(name, val)

    win = actr.open_exp_window("* STROOP TASK *",
                               width=800,
                               height=600,
                               visible=visible)

    actr.install_device(win)

    task = StroopTask(setup=False)
    #task.window = win

    actr.add_command("stroop-next", task.next, "Updates the internal task")
    actr.add_command("stroop-update-window", task.update_window,
                     "Updates the window")
    actr.add_command("stroop-accept-response", task.accept_response,
                     "Accepts a response for the Stroop task")

    actr.monitor_command("output-key", "stroop-accept-response")

    task.setup(win)
    if not trace:
        actr.set_parameter_value(":V", False)
    actr.run(time)
    if verbose:
        print("-" * 80)
        task.print_stats(task.run_stats())

    # Cleans up the interface
    # (Removes all the links between ACT-R and this object).

    actr.remove_command_monitor("output-key", "stroop-accept-response")
    actr.remove_command("stroop-next")
    actr.remove_command("stroop-update-window")
    actr.remove_command("stroop-accept-response")

    # Returns the task as a Python object for further analysis of data
    return task
Esempio n. 16
0
def do_experiment(size, trials, human):

    actr.reset()

    result = []
    model = not (human)
    window = actr.open_exp_window("Paired-Associate Experiment", visible=human)

    if model:
        actr.install_device(window)

    for i in range(trials):
        score = 0
        time = 0

        for prompt, associate in actr.permute_list(pairs[20 - size:]):

            actr.clear_exp_window(window)
            actr.add_text_to_exp_window(window, prompt, x=150, y=150)

            global response
            response = ''
            start = actr.get_time(model)

            if model:
                actr.run_full_time(5)
            else:
                while (actr.get_time(False) - start) < 5000:
                    actr.process_events()

            if response == associate:
                score += 1
                time += response_time - start

            actr.clear_exp_window(window)
            actr.add_text_to_exp_window(window, associate, x=150, y=150)
            start = actr.get_time(model)

            if model:
                actr.run_full_time(5)
            else:
                while (actr.get_time(False) - start) < 5000:
                    actr.process_events()

        if score > 0:
            average_time = time / score / 1000.0
        else:
            average_time = 0

        result.append((score / size, average_time))

    return result
Esempio n. 17
0
def trial(arg1,arg2):

    actr.reset()
    actr.install_device(["speech","microphone"])
    need_to_remove = add_speech_monitor()
    actr.new_digit_sound(arg1)
    actr.new_digit_sound(arg2,.75)
    global response
    response = False
    actr.run(30)
    if need_to_remove:
        remove_speech_monitor()

    return response
Esempio n. 18
0
def init_all(open_window = False):
    global window, window_reading, chunk_defs, chunk_names, correct_dict

    actr.reset()

    window = actr.open_exp_window("Letter Writing", open_window,
                                    windowX, windowY, 200, 300)
    window_reading = actr.open_exp_window("Letter Goal", open_window,
                                    windowX, windowY, 800, 300)
    actr.install_device(window_reading)
    actr.install_device(window)
    chunk_defs = define_chunks(defs = True)
    chunk_names = define_chunks(defs = False)
    correct_dict = correctDict.make_correct_dict()
Esempio n. 19
0
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    target = items[0]
    foil = items[1]
    window = actr.open_exp_window("Letter difference")
    text1 = foil
    text2 = foil
    text3 = foil
    index = actr.random(3)

    if index == 0:
        text1 = target
    elif index == 1:
        text2 = target
    else:
        text3 = target

    actr.add_text_to_exp_window(window, text1, x=125, y=75)
    actr.add_text_to_exp_window(window, text2, x=75, y=175)
    actr.add_text_to_exp_window(window, text3, x=175, y=175)

    actr.add_command("unit2-key-press", respond_to_key_press,
                     "Assignment 2 task output-key monitor")
    actr.monitor_command("output-key", "unit2-key-press")

    global response
    response = ''

    if human == True:
        while response == '':
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "unit2-key-press")
    actr.remove_command("unit2-key-press")

    if response.lower() == target.lower():
        return True
    else:
        return False
def model_loop():

    global win
    actr.add_command('present_stim', present_stim, 'presents stimulus')
    actr.add_command('present_feedback', present_feedback, 'presents feedback')
    actr.add_command('get_response', get_response, 'gets response')

    #open window for interaction
    win = actr.open_exp_window("test", visible=False)
    actr.install_device(win)
    actr.schedule_event_relative(0, 'present_stim')

    #waits for a key press?
    actr.monitor_command("output-key", 'get_response')
    actr.run(45)
Esempio n. 21
0
def test_unit3():
    """this test unit examines trace"""
    assert check_load()
    trial = ('?', 'Punishment', 'MostlyReward')
    prompt, feedback, block_type = trial

    window = actr.open_exp_window("Gambling Experiment", visible=False)
    actr.install_device(window)
    actr.clear_exp_window(window)
    actr.add_text_to_exp_window(window, prompt, x=150, y=150)
    actr.run_full_time(5)

    actr.clear_exp_window(window)
    actr.add_text_to_exp_window(window, feedback, x=150, y=150)
    actr.run_full_time(5)
Esempio n. 22
0
def test_it ():

    actr.reset()
  
    actr.add_command("relay-speech-output", relay_speech_output, "Handle player speak actions")
    actr.monitor_command("output-speech","relay-speech-output")

    for m in actr.mp_models():
        actr.set_current_model(m)
        actr.install_device(["speech","microphone"])

    actr.run(10)
    
    actr.remove_command_monitor("output-speech", "relay-speech-output")    
    actr.remove_command("relay-speech-output")
Esempio n. 23
0
def experiment(human=False):
    global response
    response = False
    if human == True:
        while response == False:
            actr.process_events()
    else:
        actr.install_device(window)
        actr.start_hand_at_mouse()
        actr.run(10, True)
    actr.remove_command_monitor("output-key", "sp-key-press")
    actr.remove_command("sp-key-press")
    print(actr.get_time(model_time=True))
    actr.run_n_events(2, real_time=False)
    return response
Esempio n. 24
0
def sentence(person, location, target, term):

    actr.reset()

    window = actr.open_exp_window("Sentence Experiment",
                                  visible=False,
                                  width=600,
                                  height=300)
    x = 25

    actr.install_device(window)

    actr.add_command("fan-response", respond_to_key_press,
                     "Fan experiment model response")
    actr.monitor_command("output-key", "fan-response")

    if term == 'person':
        actr.pdisable("retrieve-from-location")
    else:
        actr.pdisable("retrieve-from-person")

    actr.add_text_to_exp_window(window, person, x=50, y=150, width=75)
    actr.add_text_to_exp_window(window, location, x=250, y=150, width=75)

    global response, response_time

    response = ''
    response_time = 0

    actr.run(30)

    actr.remove_command_monitor("output-key", "fan-response")
    actr.remove_command("fan-response")

    if response == '':
        return (30, False)
    elif target:
        if response.lower() == 'k'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
    else:
        if response.lower() == 'd'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
Esempio n. 25
0
def setup_experiment(human=True):
    '''
    Load the correct ACT-R model, and create a window to display the words
    '''
    if subject == "controls":
        actr.load_act_r_model(
            "/Users/chisa/Desktop/masters/first_year_research_project/my_model/csm_free_recall_model.lisp"
        )
    elif subject == "depressed":
        actr.load_act_r_model(
            "/Users/chisa/Desktop/masters/first_year_research_project/my_model/csm_free_recall_model_depressed.lisp"
        )

    window = actr.open_exp_window("Free Recall Experiment",
                                  width=1024,
                                  height=768,
                                  visible=human)  # 15inch resolution window
    actr.install_device(window)
    return window
Esempio n. 26
0
def test1():
    # only DO trials - 10
    trials = []
    num_trials = 1
    response_list = []
    prime_template = ['isa', 'sentence',
                      'string', '...',
                      'noun1', 'n1',
                      'noun2', 'n2',
                      'verb', 'v',
                      'syntax', 'DO',
                      'syntax-corr', 'yes']
    for i in range(int(num_trials)):
        prime_sentence = copy(prime_template)
        prime_sentence[-3] = 'DO'
        prime_sentence[-1] = 'yes'
        trials.append(prime_sentence)

    actr.reset()

    # insatll device
    actr.install_device(("speech", "microphone"))

    for i in range(num_trials):
        response = single_trial(trials[i])
        syn = trials[i][-3]
        syn_corr = trials[i][-1]
        print("prime:",syn, syn_corr, "resp:", response)
        # if response=='failure':
            # print("---------------")
            #actr.sdp('DO-form', 'PO-form')
            #actr.whynot('step6-1')
            # actr.whynot_dm('DO-form', 'PO-form')

        response_list.append(response)
    print("response count()",
        "DO:", response_list.count("DO"),
        "PO:", response_list.count("PO"), "\ntotal: ", num_trials)
    print("prop_DO", response_list.count("DO")*1.0/(response_list.count("DO")+response_list.count("PO")))
Esempio n. 27
0
def run_test(visible=False):

    actr.reset()

    win = actr.open_exp_window("background test",
                               visible=visible,
                               width=390,
                               height=390,
                               x=100,
                               y=100)

    actr.install_device(win)

    actr.start_hand_at_mouse()

    actr.add_image_to_exp_window(win,
                                 "background",
                                 "ref-brain.gif",
                                 x=0,
                                 y=0,
                                 width=390,
                                 height=390)

    for x in range(3):
        for y in range(3):
            actr.add_visicon_features([
                "screen-x", 164 + (x * 130), "screen-y", 164 + (y * 130),
                "height", 128, "width", 128, "name",
                ["'brain'", "'brain-%d'" % (x + (y * 3))]
            ])

    # run for the vision module to process the scene
    actr.run_n_events(2)

    actr.print_visicon()

    # run for up to 5 seconds using real-time if the window is visible
    actr.run(10, visible)
Esempio n. 28
0
def run_test (visible=False):

    actr.reset()
  
    win = actr.open_exp_window("image test",visible=visible, width=310, height=420)
    
    actr.install_device(win)
    
    actr.start_hand_at_mouse()
    
    actr.add_image_to_exp_window(win, "logo", "smalllogo.gif", x=10, y=10, width=288, height=142)
    
    actr.add_items_to_exp_window(win, actr.create_image_for_exp_window(win, "brain", "ref-brain.gif", x=10, y=160, width=128, height=128, action="click-brain-py"))
    
    actr.add_image_to_exp_window(win, "brain-2", "ref-brain.gif", x=10, y=290, width=128, height=128, action=["click-brain-2-py","this string"])
    
    # run for the vision module to process the scene
    actr.run_n_events(2) 
    
    actr.print_visicon()
    
    # run for up to 5 seconds using real-time if the window is visible
    actr.run(5, visible)
Esempio n. 29
0
def example():
    actr.reset()

    # The first window is located at x=0 and y=0 which
    # is fine for virtual windows, but if it were a real
    # window that window would be behind the menu bar
    # at the top of the display under OS X which makes
    # it difficult to interact with.  The second window
    # is located at x=200, y=200.  The default mouse
    # position is x=0, y=0.

    w1 = actr.open_exp_window("W1",
                              visible=False,
                              width=100,
                              height=100,
                              x=0,
                              y=0)
    w2 = actr.open_exp_window("W2",
                              visible=False,
                              width=100,
                              height=100,
                              x=200,
                              y=200)

    # add text to the same local position in each window.

    actr.add_text_to_exp_window(w1, "a", x=10, y=10, color='red')
    actr.add_text_to_exp_window(w2, "a", x=10, y=10, color='blue')

    # Install both windows and the mouse cursor

    actr.install_device(w1)
    actr.install_device(w2)
    actr.install_device(["vision", "cursor", "mouse"])

    # Just run the model to have vision module process
    # things and print the visicon.
    # The model doesn't do anything.

    actr.run(1)
    actr.print_visicon()
Esempio n. 30
0
def play(player1, player2):
    global window, p1, p2, p1_position, p2_position, current_player, game_over, safety_stop, p1_text, p2_text

    if (player1.lower() in (x.lower() for x in actr.mp_models())) and (
            player2.lower() in (x.lower() for x in actr.mp_models())):

        actr.reset()

        actr.set_current_model(player1)

        # create a goal chunk with the player's color and name
        actr.define_chunks([
            'goal', 'isa', 'play', 'my-color', 'red', 'my-name',
            "'%s'" % player1
        ])
        actr.goal_focus('goal')
        actr.set_parameter_value(':show-focus', 'red')

        actr.set_current_model(player2)

        # create a goal chunk with the player's color and name
        actr.define_chunks([
            'goal', 'isa', 'play', 'my-color', 'blue', 'my-name',
            "'%s'" % player2
        ])
        actr.goal_focus('goal')
        actr.set_parameter_value(':show-focus', 'blue')

        window = actr.open_exp_window("game",
                                      visible=True,
                                      width=200,
                                      height=100)
        safety_window = actr.open_exp_window('Safety',
                                             visible=True,
                                             height=100,
                                             width=100,
                                             x=100,
                                             y=100)

        actr.add_command('stop-a-run', stop_a_run,
                         'Set the flag to terminate the game.')

        actr.add_button_to_exp_window(safety_window,
                                      text="STOP",
                                      x=0,
                                      y=0,
                                      action='stop-a-run',
                                      height=80,
                                      width=80,
                                      color='red')

        p1 = player1
        p2 = player2
        game_over = False
        safety_stop = False
        current_player = player1
        p1_position = 0
        p2_position = 5

        for m in actr.mp_models():
            actr.set_current_model(m)
            actr.install_device(window)

        p1_text = actr.add_text_to_exp_window(window,
                                              str(p1_position),
                                              x=20,
                                              y=10,
                                              color='red',
                                              height=30,
                                              width=30,
                                              font_size=20)
        p2_text = actr.add_text_to_exp_window(window,
                                              str(p2_position),
                                              x=140,
                                              y=10,
                                              color='blue',
                                              height=30,
                                              width=30,
                                              font_size=20)

        actr.add_command("process-moves", process_moves,
                         "Handle player speak actions")
        actr.monitor_command("output-speech", "process-moves")

        # speak to all models telling them the name
        # of the first player.

        for m in actr.mp_models():
            actr.set_current_model(m)
            actr.new_word_sound(p1, 0, 'start')

        actr.add_command('is-game-over', is_game_over,
                         'Test whether game should stop running.')
        actr.add_command('set_game_over', set_game_over,
                         'Set the flag to stop the game.')

        actr.run_until_condition('is-game-over', True)

        actr.remove_command_monitor("output-speech", "process-moves")
        actr.remove_command("process-moves")
        actr.remove_command('stop-a-run')
        actr.remove_command('is-game-over')
        actr.remove_command('set_game_over')

    return game_over