def load_decks(): global decka_counter global deckb_counter global deckc_counter global deckd_counter if decka_counter < 40: decka = "yes" else: decka = "no" if deckb_counter < 40: deckb = "yes" else: deckb = "no" if deckc_counter < 40: deckc = "yes" else: deckc = "no" if deckd_counter < 40: deckd = "yes" else: deckd = "no" deck_chunk = actr.define_chunks([ 'isa', 'decks', 'deckA', decka, 'deckB', deckb, 'deckC', deckc, 'deckD', deckd ]) actr.set_buffer_chunk("visual", deck_chunk[0])
def train_once(letter): global chunk_names; for chunk in chunk_names[letter]: actr.set_buffer_chunk('imaginal', chunk[0]) actr.clear_buffer('imaginal') return
def add_past_tense_to_memory (): word = random_word() actr.set_buffer_chunk('imaginal', actr.define_chunks(['verb',word[0], 'stem',word[1], 'suffix',word[2]])[0]) actr.clear_buffer('imaginal')
def make_one_goal(): word = random_word() actr.set_buffer_chunk('imaginal',actr.define_chunks(['verb',word[0]])[0]) actr.goal_focus('starting-goal') return(word)
def present_new_situation(self, buffer="imaginal"): """Creates a new situation for the model and presents to the WHERE buffer""" if actr.mp_time() == self.PTET: newdef = self.generate_random_memory(traumatic=True) else: newdef = self.generate_random_memory(traumatic=False) newchunk = actr.define_chunks(newdef[0]) actr.set_buffer_chunk(buffer, newchunk[0])
def present_stim(): global chunks global stims global i chunks = actr.define_chunks(['isa', 'stimulus', 'picture', stims[i]]) actr.set_buffer_chunk('visual', chunks[0]) print('Presented: ', stims[i]) print('correct response: ', cor_resps[i])
def run_trial(self, trial): """A trial""" sen = trial.sentence pic = trial.picture chunk_s = actr.define_chunks(sen.chunk_definition)[0] actr.set_buffer_chunk('visual', chunk_s) actr.run(time = 10) chunk_p = actr.define_chunks(pic.chunk_definition)[0] actr.schedule_set_buffer_chunk('visual', chunk_p, actr.mp_time() + 0.05) actr.run(time = 10)
def task1(prime_stimulus): """ This function simulates the prime sentence verification task. The model parses in the prime sentence, and attempts to comprehend it. :param prime_stimulus: list, for simplification, assumes only syntax and syntax-corr changes based on condition :return: """ prime_sentence = actr.define_chunks(prime_stimulus)[0] actr.set_buffer_chunk('visual', prime_sentence) # prime sentence # set init goal # actr.record_history('buffer-trace', 'vocal') actr.goal_focus('wait-for-screen') actr.run(10)
def task2(target_stimulus=None): """ This function simulates the picture description task. The model observes the picture stimuli and attempts to describe the picture using one of potential syntactic structure. :param target_stimulus: None, for simplification, assume the picture stimuli uses the same verb as prime sentence :return: """ target_stimulus = ['isa', 'picture', 'agent', 'n3', 'patient', 'n4', 'action', 'v'] target_picture = actr.define_chunks(target_stimulus)[0] # set second goal actr.goal_focus('wait-for-next-screen') actr.set_buffer_chunk('visual', target_picture) # target picture actr.run(10)
def present_feedback(): global i global current_response global accuracy feedback = 'no' # check if response matches the appropriate key for the current stimulus in cue #need list of correct responses if current_response[i] == cor_resps[i]: feedback = 'yes' accuracy[i] = 1 chunks = actr.define_chunks(['isa', 'feedback', 'feedback', feedback]) actr.set_buffer_chunk('visual', chunks[0]) print("Feedback given: ", feedback) #increase index for next stimulus i = i + 1 actr.schedule_event_relative(1, 'present_stim')
def show_result(choice): actr.mod_chunk('response', 'answer', choice) actr.set_buffer_chunk('imaginal', 'response') actr.schedule_event_relative(2, 'utility-learning-issues-choose', output='medium')
def load_reward(): global reward reward_chunk = actr.define_chunks( ['isa', 'reward-amount', 'amount', reward]) actr.set_buffer_chunk("visual", reward_chunk[0])