def present_new_situation(self, buffer="imaginal"): """Creates a new situation for the model and presents to the WHERE buffer""" if actr.mp_time() == self.PTET: newdef = self.generate_random_memory(traumatic=True) else: newdef = self.generate_random_memory(traumatic=False) newchunk = actr.define_chunks(newdef[0]) actr.set_buffer_chunk(buffer, newchunk[0])
def next(self): """Moves on in th task progression""" if self.phase == "fixation": self.phase = "stimulus" self.current_trial.onset = actr.mp_time() elif self.phase == "stimulus": self.current_trial.offset = actr.mp_time() self.index += 1 self.log.append(self.current_trial) if self.index >= len(self.stimuli): self.phase = "done" else: self.current_trial = StroopTrial(self.stimuli[self.index]) self.phase = "fixation" actr.schedule_event_relative(1, "stroop-next") actr.schedule_event_now("stroop-update-window")
def run_trial(self, trial): """A trial""" sen = trial.sentence pic = trial.picture chunk_s = actr.define_chunks(sen.chunk_definition)[0] actr.set_buffer_chunk('visual', chunk_s) actr.run(time = 10) chunk_p = actr.define_chunks(pic.chunk_definition)[0] actr.schedule_set_buffer_chunk('visual', chunk_p, actr.mp_time() + 0.05) actr.run(time = 10)
def simulate(self): """Runs a single simulation""" # Add commands and hooks actr.add_command("v_offset", self.chunk_v_term, "Extra term in activation") actr.add_command("spreading", self.spreading_activation, "Overrides normal spreading activation algorithm") actr.add_command("monitor_retrievals", self.monitor_retrievals, "Monitors what is being retrieved") actr.add_command("next", self.present_new_situation, "Presents a new situation") actr.add_command("keep_table", self.add_chunk) # Makes sure we are loading the current model from # the current directory curr_dir = os.path.dirname(os.path.realpath(__file__)) actr.load_act_r_model(os.path.join(curr_dir, self.model)) actr.set_parameter_value(":V", False) actr.set_parameter_value(":cmdt", False) # Apply the set of provided parameters for param, value in self.model_params.items(): actr.set_parameter_value(param, value) # Run a life simulation event_time = 0.0 while actr.mp_time() < self.max_time: actr.schedule_event(event_time, "next") event_time += self.event_step actr.run(self.event_step) # No need to run beyond the event step # Clean-up actr.remove_command("next") actr.remove_command("v_offset") actr.remove_command("spreading") actr.remove_command("keep_table") actr.remove_command("monitor_retrievals") # Update counter self.counter += 1
def monitor_retrievals(self, chunk): """Keeps track of what is being retrieved and why""" v = 0.0 # Emotional load of retrieved memory s = 0.0 # Similarity of memory to current situation t = 0.0 # Is the memory traumatic or not? if chunk is not None and \ actr.chunk_slot_value(chunk, "kind") == "MEMORY": param_values = [] for param in sorted(self.model_params.keys()): param_values.append(self.model_params[param]) source = actr.buffer_chunk("imaginal")[0] v = self.V_TABLE[chunk] s = self.chunk_similarity(chunk, source) if actr.chunk_slot_value(chunk, "traumatic") == "YES": t = 1.0 self.TRACE.append([self.counter, self.currentV, actr.mp_time(), v, t, s] + \ param_values)
def relay_speech_output (model, string): for m in actr.mp_models(): if not (m.lower() == model.lower()): # all other models will hear this actr.set_current_model(m) # Create the originating model's name as a simple chunk if # it isn't one already to avoid a warning of it being # created by default when the sound is generated. if not(actr.chunk_p(model)): actr.define_chunks(model) # Create the sound as a word with location indicating # the speaker. actr.new_word_sound(string, actr.mp_time(), model)
def process_moves(model, string): global p1_position, p2_position, current_player, p1_text, p2_text # Let all other models hear this for m in actr.mp_models(): if not (m.lower() == model.lower()): # all other models will hear this actr.set_current_model(m) # Create the originating model's name as a simple chunk if # it isn't one already to avoid a warning of it being # created by default when the sound is generated. if not (actr.chunk_p(model)): actr.define_chunks(model) # Create the sound as a word with location indicating # the speaker. actr.new_word_sound(string, actr.mp_time(), model) if (string.lower() == "one"): move = 1 elif (string.lower() == "two"): move = 2 else: print("Wrong move assumed to be 1: %s" % string) move = 1 if current_player == p1: p1_position += move else: p2_position -= move actr.clear_exp_window(window) if (p2_position <= p1_position): # if there's a winner actr.schedule_event_relative(3, 'set_game_over', params=[current_player]) actr.add_text_to_exp_window(window, current_player, x=60, y=20, color='green', height=30, width=80, font_size=20) else: # not a winner so update the display with the new position p1_text = actr.add_text_to_exp_window(window, str(p1_position), x=20, y=10, color='red', height=30, width=30, font_size=20) p2_text = actr.add_text_to_exp_window(window, str(p2_position), x=140, y=10, color='blue', height=30, width=30, font_size=20) if current_player == p1: current_player = p2 else: current_player = p1