def trials(n=200,reset=True,output=True): global responses,task_over,exp_length,window if reset: actr.reset() window = actr.open_exp_window("Compilation task",visible=False) times = [] responses = [] task_over = False exp_length = n present_next_trial() actr.install_device(window) # actr.add_command('compilation-issues-game-over',game_over,"Test for the production compilation issues game being over") actr.add_command('compilation-issues-response',respond_to_key_press,"Compilation issues key press response monitor") actr.monitor_command('output-key','compilation-issues-response') # this is how the original ran: actr.run_until_condition('compilation-issues-game-over') # however performing a remote call for each event to determine the stopping point # takes almost 2 orders of magnitude longer to run! So instead just run # sufficiently long to complete the task and assume the model stops when there's # nothing left to do. actr.run(20000) actr.remove_command_monitor('output-key','compilation-issues-response') actr.remove_command ('compilation-issues-response') # actr.remove_command ('compilation-issues-game-over') return analyze_results(output)
def experiment(human=False): actr.reset() items = actr.permute_list([ "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "X", "Y", "Z" ]) text1 = items[0] window = actr.open_exp_window("Letter recognition") actr.add_text_to_exp_window(window, text1, x=125, y=150) actr.add_command("demo2-key-press", respond_to_key_press, "Demo2 task output-key monitor") actr.monitor_command("output-key", "demo2-key-press") global response response = False if human == True: while response == False: actr.process_events() else: actr.install_device(window) actr.run(10, True) actr.remove_command_monitor("output-key", "demo2-key-press") actr.remove_command("demo2-key-press") return response
def demo_table(): actr.reset() #init() #...when manually setting the sentence (without syntetizer) text = "give fork" #...when using Aldebran proxy for speech recognition #text = AL_speech_recognition() #...when using Google Api (no Python 2.7) #text = GAPI_speech_recognition() string = tokenize(text) onset = 0 actr.set_parameter_value(":sound-decay-time", 0.2) #actr.set_parameter_value(":save-audicon-history", True) actr.add_command("inner-speech-response", record_model_speech, "Inner speech model response") actr.monitor_command("output-speech", "inner-speech-response") actr.install_device(["speech", "microphone"]) for word in string: if TOK.descr[word.kind] == "WORD": print(str(word.txt)) actr.new_word_sound(str(word.txt), onset) onset = onset + 0.2 actr.run(30) actr.remove_command_monitor("output-speech", "inner-speech-response") actr.remove_command("inner-speech-response")
def single_trial(prime_stimulus, **param_set): """ This function simulates an single trial. At the begining of each trial, the model is reset. The model's response is collected as either DO/PO for a simplified version of full sentence :param prime_stimulus: dict type, the prime stimulus, indicating the condition :return: """ global response response = False while not response: actr.reset() actr.install_device(("speech", "microphone")) if param_set: set_parameters(**param_set) #reset param # actr.record_history('BUFFER-TRACE','production-graph-utility') # actr.record_history('buffer-trace', 'goal') # actr.set_parameter_value(':v', 't') syntax = prime_stimulus[-3] syntax_corr = prime_stimulus[-1] actr.add_command("model1-key-press", respond_to_speech, "model1 task output-key monitor") actr.monitor_command("output-speech", "model1-key-press") task1(prime_stimulus) task2() actr.remove_command_monitor("output-speech", "model1-key-press") actr.remove_command("model1-key-press") return response
def person(): global response window = actr.open_exp_window("Choice Experiment", visible=True) actr.add_command("choice-response", respond_to_key_press, "Choice task key response") actr.monitor_command("output-key", "choice-response") actr.add_text_to_exp_window(window, 'choose', x=50, y=100) response = '' while response == '': actr.process_events() actr.clear_exp_window(window) if actr.random(1.0) < .9: answer = 'heads' else: answer = 'tails' actr.add_text_to_exp_window(window, answer, x=50, y=100) start = actr.get_time(False) while (actr.get_time(False) - start) < 1000: actr.process_events() actr.remove_command_monitor("output-key", "choice-response") actr.remove_command("choice-response") return response
def task(which=False): global response, response_time actr.reset() alphabet = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" ] letter = actr.permute_list(alphabet)[0] alphabet = ["Z"] + alphabet if which == 'next' or which == 'previous': task = which elif actr.random(2) == 0: task = 'next' else: task = 'previous' time = 1500 + actr.random(1000) window = actr.open_exp_window("Simple task") actr.install_device(window) actr.add_command("pm-issues-response", respond_to_key_press, "Perceptual-motor issues task response") actr.monitor_command("output-key", "pm-issues-response") actr.add_text_to_exp_window(window, letter, x=130, y=150) actr.add_command("pm-issue-display", display_prompt, "Perceptual-motor issues task prompt display") actr.schedule_event_relative(time, "pm-issue-display", params=[window, task], time_in_ms=True) response = [] response_time = False actr.run(10, True) actr.remove_command("pm-issue-display") actr.remove_command_monitor("output-key", "pm-issues-response") actr.remove_command("pm-issues-response") if (len(response) == 2 and response_time > time and response[0] == letter and ((task == 'next' and alphabet.index(response[0]) == (alphabet.index(response[1]) - 1)) or (task == 'previous' and alphabet.index(response[0]) == (alphabet.index(response[1]) + 1)))): result = True else: result = False return [task, result]
def demo_mirror(): init() actr.reset() actr.add_command("inner-speech-response", record_model_speech, "Inner speech model response") actr.monitor_command("output-speech", "inner-speech-response") actr.install_device(["speech", "microphone"]) actr.run(60) actr.remove_command_monitor("output-speech", "inner-speech-response") actr.remove_command("inner-speech-response")
def add_speech_monitor(): global monitor_installed if monitor_installed == False: actr.add_command("siegler-response",record_model_speech,"Siegler task model response") actr.monitor_command("output-speech","siegler-response") monitor_installed = True return True else: return False
def trials(n,cont=False,v=False): global report,word_list,reward_check actr.add_command("reward-check",verify_reward, "Past tense code check for a reward each trial.") actr.monitor_command("trigger-reward","reward-check") if not(cont) or not(word_list): actr.reset() word_list = make_word_freq_list(verbs) new = [] for x in word_list: for y in x[1:]: if y not in new: new.append(y) for x in new: if not(actr.chunk_p(x)): actr.define_chunks([x]) print_header() report = [] actr.set_parameter_value(":v",v) start = 100 * math.floor(len(report) / 100) count = len(report) % 100 for i in range(n): add_past_tense_to_memory() add_past_tense_to_memory() reward_check = False target = make_one_goal() duration = actr.run(100)[0] add_to_report(target,actr.buffer_read('imaginal')) actr.clear_buffer('imaginal') count += 1 if count == 100: rep_f_i(start, start + 100, 100) count = 0 start += 100 if not(reward_check): actr.print_warning("Model did not receive a reward when given %s."% target[0]) actr.run_full_time(200 - duration) if duration == 100: actr.print_warning("Model spent 100 seconds generating a past tense for %s."% target[0]) rep_f_i(start,start+count,100) actr.remove_command_monitor("trigger-reward","reward-check") actr.remove_command("reward-check")
def add_key_monitor(): global key_monitor_installed if key_monitor_installed == False: actr.add_command("1hit-bj-key-press", respond_to_keypress, "1-hit blackjack task key output monitor") actr.monitor_command("output-key", "1hit-bj-key-press") key_monitor_installed = True return True else: return False
def experiment_initialization(self, vis=False): # actr.reset() # actr.reload() actr.add_command("unit2-key-press", self.respond_to_key_press, "Assignment 2 task output-key monitor") actr.add_command("my-event-hook", self.post_event_hook, "called after an event") actr.monitor_command("output-key", "unit2-key-press") actr.call_command("add-post-event-hook", "my-event-hook") self.window = actr.open_exp_window("Leter difference task", visible=vis)
def trial(onset_time): actr.reset() letters = actr.permute_list([ "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "X", "Y", "Z" ]) answers = [] row = actr.random(3) window = actr.open_exp_window("Sperling Experiment", visible=True) for i in range(3): for j in range(4): txt = letters[j + (i * 4)] if i == row: answers.append(txt) actr.add_text_to_exp_window(window, txt, x=(75 + (j * 50)), y=(100 + (i * 50))) actr.install_device(window) if row == 0: freq = 2000 elif row == 1: freq = 1000 else: freq = 500 actr.new_tone_sound(freq, .5, onset_time) actr.schedule_event_relative(900 + actr.random(200), "clear-exp-window", params=[window], time_in_ms=True) global responses responses = [] actr.add_command("sperling-response", respond_to_key_press, "Sperling task key press response monitor") actr.monitor_command("output-key", "sperling-response") actr.run(30, True) actr.remove_command_monitor("output-key", "sperling-response") actr.remove_command("sperling-response") if show_responses: print("answers: %s" % answers) print("responses: %s" % responses) return (compute_score(answers))
def task(size, trials, human=False): actr.add_command("paired-response", respond_to_key_press, "Paired associate task key press response monitor") actr.monitor_command("output-key", "paired-response") result = do_experiment(size, trials, human) actr.remove_command_monitor("output-key", "paired-response") actr.remove_command("paired-response") return result
def simulate(self, trace=False, utility_offset=True): """Runs SP simulations using real stimuli""" # Function hook to modify the utility calculation # (will add a mismatch penalty). Need to be defined # before the model is loaded actr.add_command("parser-offset", self.utility_offset, "Calculates a mismatch penalty for AI condition") actr.load_act_r_model(self.model) for condition in self.CONDITIONS: self.current_condition = condition subset = [t for t in self.trials if t.condition == condition] for j in range(self.n): actr.reset() # Make the model silent in case if not trace: actr.set_parameter_value(":V", False) # The model does not really need a visual interface, # but the default AGI provides a virtual mic to record # voice output. win = actr.open_exp_window("SP", width = 80, height = 60, visible=False) actr.install_device(win) # Function hooks to record the model responses. actr.add_command("record-response", self.record_response, "Accepts a response for the SP task") actr.monitor_command("output-speech", "record-response") # Run a single trial in the given condition trial = random.choice(subset) self.run_trial(trial) # Clean up the function hooks actr.remove_command_monitor("output-speech", "record-response") actr.remove_command("record-response") # Removes the offset actr.remove_command("parser-offset")
def run_experiment(model_name="response-monkey.lisp", time=200, verbose=True, visible=True, trace=True, params=[]): """Runs an experiment""" actr.reset() # current directory curr_dir = os.path.dirname(os.path.realpath(__file__)) actr.load_act_r_model(os.path.join(curr_dir, model_name)) # Set then model parameters for name, val in params: actr.set_parameter_value(name, val) win = actr.open_exp_window("* STROOP TASK *", width=800, height=600, visible=visible) actr.install_device(win) task = StroopTask(setup=False) #task.window = win actr.add_command("stroop-next", task.next, "Updates the internal task") actr.add_command("stroop-update-window", task.update_window, "Updates the window") actr.add_command("stroop-accept-response", task.accept_response, "Accepts a response for the Stroop task") actr.monitor_command("output-key", "stroop-accept-response") task.setup(win) if not trace: actr.set_parameter_value(":V", False) actr.run(time) if verbose: print("-" * 80) task.print_stats(task.run_stats()) # Cleans up the interface # (Removes all the links between ACT-R and this object). actr.remove_command_monitor("output-key", "stroop-accept-response") actr.remove_command("stroop-next") actr.remove_command("stroop-update-window") actr.remove_command("stroop-accept-response") # Returns the task as a Python object for further analysis of data return task
def experiment(human=False): actr.reset() items = actr.permute_list([ "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "X", "Y", "Z" ]) target = items[0] foil = items[1] window = actr.open_exp_window("Letter difference") text1 = foil text2 = foil text3 = foil index = actr.random(3) if index == 0: text1 = target elif index == 1: text2 = target else: text3 = target actr.add_text_to_exp_window(window, text1, x=125, y=75) actr.add_text_to_exp_window(window, text2, x=75, y=175) actr.add_text_to_exp_window(window, text3, x=175, y=175) actr.add_command("unit2-key-press", respond_to_key_press, "Assignment 2 task output-key monitor") actr.monitor_command("output-key", "unit2-key-press") global response response = '' if human == True: while response == '': actr.process_events() else: actr.install_device(window) actr.run(10, True) actr.remove_command_monitor("output-key", "unit2-key-press") actr.remove_command("unit2-key-press") if response.lower() == target.lower(): return True else: return False
def test_it (): actr.reset() actr.add_command("relay-speech-output", relay_speech_output, "Handle player speak actions") actr.monitor_command("output-speech","relay-speech-output") for m in actr.mp_models(): actr.set_current_model(m) actr.install_device(["speech","microphone"]) actr.run(10) actr.remove_command_monitor("output-speech", "relay-speech-output") actr.remove_command("relay-speech-output")
def model_loop(): global win actr.add_command('present_stim', present_stim, 'presents stimulus') actr.add_command('present_feedback', present_feedback, 'presents feedback') actr.add_command('get_response', get_response, 'gets response') #open window for interaction win = actr.open_exp_window("test", visible=False) actr.install_device(win) actr.schedule_event_relative(0, 'present_stim') #waits for a key press? actr.monitor_command("output-key", 'get_response') actr.run(45)
def sentence(person, location, target, term): actr.reset() window = actr.open_exp_window("Sentence Experiment", visible=False, width=600, height=300) x = 25 actr.install_device(window) actr.add_command("fan-response", respond_to_key_press, "Fan experiment model response") actr.monitor_command("output-key", "fan-response") if term == 'person': actr.pdisable("retrieve-from-location") else: actr.pdisable("retrieve-from-person") actr.add_text_to_exp_window(window, person, x=50, y=150, width=75) actr.add_text_to_exp_window(window, location, x=250, y=150, width=75) global response, response_time response = '' response_time = 0 actr.run(30) actr.remove_command_monitor("output-key", "fan-response") actr.remove_command("fan-response") if response == '': return (30, False) elif target: if response.lower() == 'k'.lower(): return (response_time / 1000, True) else: return (response_time / 1000, False) else: if response.lower() == 'd'.lower(): return (response_time / 1000, True) else: return (response_time / 1000, False)
def task(trials): """ This function present task and monitor response from model :param size: number of trials to present :param trials: the trial list :return: """ # monitor the output-key actr.add_command("paired-response", respond_to_key_press, "Paired associate task key press response monitor") actr.monitor_command("output-key", "paired-response") result = do_experiment(trials) actr.remove_command_monitor("output-key", "paired-response") actr.remove_command("paired-response") return result
def collect_responses(count): global results results = [] actr.add_command("zbrodoff-response", respond_to_key_press, "Zbrodoff task key press response monitor") actr.monitor_command("output-key", "zbrodoff-response") present_trial(trials[0]) if run_model: actr.run(10 * count) else: while len(results) < count: actr.process_events() actr.remove_command_monitor("output-key", "zbrodoff-response") actr.remove_command("zbrodoff-response")
def present_trial(given_letter = None, train_epochs = 1, new_window = True, open_window = True, delay_time = 10, display = False): global window, window_reading, clickList, chunk_names, chunk_defs if (given_letter == None): letterList = actr.permute_list(list(string.ascii_uppercase)) chosen_letter = letterList[0] else: chosen_letter = given_letter if display: train_n(10, chosen_letter) clickList = [] actr.clear_exp_window(window_reading) actr.clear_exp_window(window) actr.add_text_to_exp_window(window_reading, chosen_letter, 125, 150) actr.add_command('process-click',process_click) actr.monitor_command('click-mouse','process-click') actr.run(1000, open_window) # for actr time or real time correct = correct_drawing(chosen_letter, clickList) if (correct): train_once(chosen_letter) actr.run_full_time(delay_time) else: train_n(train_epochs, chosen_letter) actr.run_full_time(delay_time) actr.remove_command_monitor('click-mouse', 'process-click') actr.remove_command('process-click') return (clickList, correct)
def test2(syntax, syntax_corr): actr.reset() prime_template = ['isa', 'sentence', 'string', '...', 'noun1', 'n1', 'noun2', 'n2', 'verb', 'v', 'syntax', syntax, 'syntax-corr', syntax_corr] actr.add_command("model1-key-press", respond_to_speech, "model1 task output-key monitor") actr.monitor_command("output-speech", "model1-key-press") # spreading activation # if syntax_corr == 'no': # print('disable both') # actr.pdisable("step5-1") # actr.pdisable("step5-2") # else: # actr.pdisable('step5-3') # if syntax == 'DO': # print('disable5-2') # actr.pdisable("step5-2") # else: # print('disable5-1') # actr.pdisable("step5-1") global response response = False task1(prime_template) task2() actr.remove_command_monitor("output-speech", "model1-key-press") actr.remove_command("model1-key-press") return response
def experiment(human=False): # I guess setting response to false globally global response, light1Color, light2Color, window, text1,text2,text3,text4, \ light1_obj,light2_obj,gauge1_obj,temp1_y,gauge1_startx,gauge1_starty, \ gauge2_startx,gauge2_startx # some default parameters # for lights text1 = "A" text2 = "B" light1Color = "green" light2Color = "black" light1_obj = None light2_obj = None # for gauges text3 = "C" text4 = "D" gauge1_startx = 60 gauge1_starty = 200 gauge2_startx = 110 gauge2_starty = 200 gauge1_rate = 2 gauge2_rate = 2 fps = 5 actr.reset() window = actr.open_exp_window("hello world", width=400, height=400, x=400, y=400) # adding commands actr.add_command("demo2-key-press", respond_to_key_press, "Demo2 task output-key monitor") actr.add_command("gauge_1_move", gauge_1_move) actr.add_command("gauge_2_move", gauge_2_move) actr.add_command("light1_off", light1_off) actr.add_command("light2_off", light2_off) actr.add_command("delayed_print_visicon", delayed_print_visicon) actr.add_command("move_y", move_y) # monitoring for key board actr.monitor_command("output-key", "demo2-key-press") # putting the initial screen onto the window intialize_screen(window, text1, text2, text3, text4, gauge1_startx, gauge1_starty, gauge2_startx, gauge2_starty) response = False # human = participant. Good for testing if human == True: if actr.visible_virtuals_available(): # puts time.sleep(0) # I guess it lets the program run # maybe will need to change in the future while response == False: actr.process_events() else: actr.install_device(window) light1 = 1 light2 = 1 time1 = 0 count = 0 for event in range(0, df.shape[0]): time1 = df["time"][event] count += 1 if df["event"][event] == ( " 'Green (First) Light Script-triggered Fault'" ) and light1 == 1: #actr.schedule_event(time1-.1,"delayed_print_visicon",params=[],maintenance=True) actr.schedule_event(time1, "light1_off", params=[window, text1, "exp"], maintenance=True) event_times.append(time1) event_type.append("light1_off") #actr.schedule_event(time1+.1,"delayed_print_visicon",params=[],maintenance=True) elif df["event"][event] == ( " 'Red (Second) Light Script-triggered Fault'" ) and light2 == 1: actr.schedule_event(time1, "light2_off", params=[window, text2, "exp"], maintenance=True) event_times.append(time1) event_type.append("light2_off") # parameters for gauges temp1_y = gauge1_starty temp2_y = gauge2_starty time1 = 0 first_time_exceeded_gauge1 = 0 first_time_exceeded_gauge2 = 0 gauges_on = 1 # if gauges_on == 1: for event in range(0, round(df["time"][len(df["time"]) - 1] * fps)): time1 = time1 + (1 / fps) temp1_y = temp1_y + gauge1_rate temp2_y = temp2_y + (gauge2_rate * -1) if temp1_y < 150 or temp1_y > 250: gauge1_color = "red" if first_time_exceeded_gauge1 == 0: first_time_exceeded_gauge1 += 1 actr.schedule_event(time1, "delayed_print_visicon", params=[], maintenance=True) actr.schedule_event(time1 + .1, "delayed_print_visicon", params=[], maintenance=True) else: gauge1_color = "black" first_time_exceeded_gauge1 = 0 if temp2_y < 150 or temp2_y > 250: gauge2_color = "red" if first_time_exceeded_gauge2 == 0: first_time_exceeded_gauge2 += 1 else: gauge2_color = "black" first_time_exceeded_gauge2 = 0 if temp1_y < 125 or temp1_y > 275: gauge1_rate = gauge1_rate * -1 if first_time_exceeded_gauge1 == 1: event_times.append(time1) event_type.append("gauge1_off") if temp2_y < 125 or temp2_y > 275: gauge2_rate = gauge2_rate * -1 if first_time_exceeded_gauge2 == 1: event_times.append(time1) event_type.append("gauge2_off") #actr.schedule_event(time1-.1,"delayed_print_visicon",params=[],maintenance=True) actr.schedule_event(time1, "gauge_1_move", params=[ window, text3, gauge1_startx, temp1_y, gauge1_color ], maintenance=True) actr.schedule_event(time1, "gauge_2_move", params=[ window, text4, gauge2_startx, temp2_y, gauge2_color ], maintenance=True) #actr.schedule_event(time1+.1,"delayed_print_visicon",params=[],maintenance=True) #actr.run(df["time"][len(df["time"])-1]) actr.run(15, True) # pause actr # removing commands and things actr.remove_command_monitor("output-key", "demo2-key-press") actr.remove_command("gauge_1_move") actr.remove_command("gauge_2_move") actr.remove_command("light1_off") actr.remove_command("light2_off") actr.remove_command("move_y") # printing some things print("respones", responses) print("response_times", times) print("event_times", event_times) print("event_type", event_type)
height=50, width=100, font_size=22) # actr.start_hand_at_mouse() actr.add_command("sp-button-pressed-up-keep-down", button_pressed, "sp press button(up\keep\down) task") actr.add_command("sp-number-sims", number_sims, "Similarity hook function for building sticks task.") actr.add_command("sp-button-stop-pressed", button_stop_pressed, "sp task output-key monitor") actr.add_command( "sp-compute-difference", compute_difference, "Imaginal action function to compute the difference between sticks." ) actr.monitor_command("output-key", "sp-key-press") experiment(human=False) actr.reset() actr.remove_items_from_exp_window(window, x1_target_speed) actr.remove_items_from_exp_window(window, x2_actual_speed) actr.remove_items_from_exp_window(window, x3_delta_speed) actr.remove_items_from_exp_window(window, x4_delta_distance) actr.remove_items_from_exp_window(window, x5) targetgroup.append(speed_target[0]) actualgroup.append(speed_actual[0]) actr.add_button_to_exp_window(window, text="7", x=500, y=60, action=["sp-button-pressed", 0.2, "up"],
def load_reward(): global reward reward_chunk = actr.define_chunks( ['isa', 'reward-amount', 'amount', reward]) actr.set_buffer_chunk("visual", reward_chunk[0]) """THE NEXT TWO LINES ARE CRITICAL FOR INTERFACING WITH THE ACT-R/LISP CODE; THEY ARE REQUIRED FOR PYTHON TO DETECT KEY PRESSES""" win = actr.open_exp_window("Test", visible=False) actr.install_device(win) """THE FOLLOWING LINES SET UP THE INTERFACE TO ACT-R/LISP CODE""" actr.add_command("get-pick", respond_to_key_press, "IGT key press response monitor") actr.monitor_command("output-key", "get-pick") actr.add_command("load_reward", load_reward, "Loads reward amount chunk to visual buffer") actr.add_command("load_decks", load_decks, "Loads decks chunk to visual buffer") event_time = 1 event_step = 1 total_picks_for_while_loop = 0 while total_picks_for_while_loop < 100: actr.schedule_event(event_time, "load_decks") event_time += event_step total_picks_for_while_loop = total_picks_for_while_loop + 1 actr.run(200)
def play(player1, player2): global p1, p2, p1_position, p2_position, game_over, current_player, safety_stop, winner, p1p1, p1p2, p2p1, p2p2 if (player1.lower() in (x.lower() for x in actr.mp_models())) and ( player2.lower() in (x.lower() for x in actr.mp_models())): actr.add_command('stop-a-run', stop_a_run, 'Set the flag to terminate the game.') win = actr.open_exp_window('Safety', visible=True, height=100, width=100, x=100, y=300) actr.add_button_to_exp_window('Safety', text='STOP', x=0, y=0, action='stop-a-run', height=90, width=90, color='red') actr.add_command('move', make_move, 'Handle player key presses') actr.monitor_command('output-key', 'move') actr.reset() # initialize the game information p1 = player1 p2 = player2 p1_position = 0 p2_position = 5 game_over = False current_player = player1 safety_stop = False winner = None actr.set_current_model(player1) actr.define_chunks(player2) feats = actr.add_visicon_features([ 'isa', ['player-loc', 'player'], 'screen-x', 0, 'screen-y', 0, 'name', player1, 'position', [False, p1_position], 'turn', [False, True] ], [ 'isa', ['player-loc', 'player'], 'screen-x', 100, 'screen-y', 0, 'name', player2, 'position', [False, p2_position] ]) p1p1 = feats[0] p1p2 = feats[1] actr.set_current_model(player2) actr.define_chunks(player1) feats = actr.add_visicon_features([ 'isa', ['player-loc', 'player'], 'screen-x', 0, 'screen-y', 0, 'name', player1, 'position', [False, p1_position], 'turn', [False, True] ], [ 'isa', ['player-loc', 'player'], 'screen-x', 100, 'screen-y', 0, 'name', player2, 'position', [False, p2_position] ]) p2p1 = feats[0] p2p2 = feats[1] actr.add_command('is-game-over', is_game_over, 'Test whether game should stop running.') actr.add_command('set_game_over', set_game_over, 'Set the flag to stop the game.') actr.run_until_condition('is-game-over') actr.clear_exp_window(win) actr.remove_command('stop-a-run') actr.remove_command_monitor('output-key', 'move') actr.remove_command('move') actr.remove_command('is-game-over') actr.remove_command('set_game_over') return winner
def play(player1, player2): global window, p1, p2, p1_position, p2_position, current_player, game_over, safety_stop, p1_text, p2_text if (player1.lower() in (x.lower() for x in actr.mp_models())) and ( player2.lower() in (x.lower() for x in actr.mp_models())): actr.reset() actr.set_current_model(player1) # create a goal chunk with the player's color and name actr.define_chunks([ 'goal', 'isa', 'play', 'my-color', 'red', 'my-name', "'%s'" % player1 ]) actr.goal_focus('goal') actr.set_parameter_value(':show-focus', 'red') actr.set_current_model(player2) # create a goal chunk with the player's color and name actr.define_chunks([ 'goal', 'isa', 'play', 'my-color', 'blue', 'my-name', "'%s'" % player2 ]) actr.goal_focus('goal') actr.set_parameter_value(':show-focus', 'blue') window = actr.open_exp_window("game", visible=True, width=200, height=100) safety_window = actr.open_exp_window('Safety', visible=True, height=100, width=100, x=100, y=100) actr.add_command('stop-a-run', stop_a_run, 'Set the flag to terminate the game.') actr.add_button_to_exp_window(safety_window, text="STOP", x=0, y=0, action='stop-a-run', height=80, width=80, color='red') p1 = player1 p2 = player2 game_over = False safety_stop = False current_player = player1 p1_position = 0 p2_position = 5 for m in actr.mp_models(): actr.set_current_model(m) actr.install_device(window) p1_text = actr.add_text_to_exp_window(window, str(p1_position), x=20, y=10, color='red', height=30, width=30, font_size=20) p2_text = actr.add_text_to_exp_window(window, str(p2_position), x=140, y=10, color='blue', height=30, width=30, font_size=20) actr.add_command("process-moves", process_moves, "Handle player speak actions") actr.monitor_command("output-speech", "process-moves") # speak to all models telling them the name # of the first player. for m in actr.mp_models(): actr.set_current_model(m) actr.new_word_sound(p1, 0, 'start') actr.add_command('is-game-over', is_game_over, 'Test whether game should stop running.') actr.add_command('set_game_over', set_game_over, 'Set the flag to stop the game.') actr.run_until_condition('is-game-over', True) actr.remove_command_monitor("output-speech", "process-moves") actr.remove_command("process-moves") actr.remove_command('stop-a-run') actr.remove_command('is-game-over') actr.remove_command('set_game_over') return game_over