コード例 #1
0
ファイル: pcomp_issues.py プロジェクト: segsev/M.tech-2nd-sem
def trials(n=200,reset=True,output=True):
    global responses,task_over,exp_length,window

    if reset:
        actr.reset()

    window = actr.open_exp_window("Compilation task",visible=False)
    times = []
    responses = []
    task_over = False
    exp_length = n
    present_next_trial()
    actr.install_device(window)
#    actr.add_command('compilation-issues-game-over',game_over,"Test for the production compilation issues game being over")
    actr.add_command('compilation-issues-response',respond_to_key_press,"Compilation issues key press response monitor")
    actr.monitor_command('output-key','compilation-issues-response')

#   this is how the original ran: actr.run_until_condition('compilation-issues-game-over')
#   however performing a remote call for each event to determine the stopping point
#   takes almost 2 orders of magnitude longer to run!  So instead just run 
#   sufficiently long to complete the task and assume the model stops when there's
#   nothing left to do.

    actr.run(20000)

    actr.remove_command_monitor('output-key','compilation-issues-response')
    actr.remove_command ('compilation-issues-response')
#    actr.remove_command ('compilation-issues-game-over')

    return analyze_results(output)
コード例 #2
0
def example():

    global ids

    actr.reset()

    actr.add_command("custom-object-chunk", custom_object_chunk,
                     "Command to dynamically create visual object")

    # Here we install our custom function using an object-creator device
    actr.install_device(["vision", "object-creator", "custom-object-chunk"])

    # Now we add two features to the display, the first of which will
    # have the object created by the custom function and the other by
    # the default mechanism.

    ids = actr.add_visicon_features(["screen-x", 10, "screen-y", 20],
                                    ["screen-x", 100, "screen-y", 20])

    # run to give vision module a chance to
    # process those features and then print the
    # visicon.

    actr.run_n_events(3)
    actr.print_visicon()

    # run to have model complete task
    actr.run(1)

    actr.remove_command("custom-object-chunk")
コード例 #3
0
def demo_table():
    actr.reset()
    #init()

    #...when manually setting the sentence (without syntetizer)
    text = "give fork"

    #...when using Aldebran proxy for speech recognition
    #text = AL_speech_recognition()

    #...when using Google Api (no Python 2.7)
    #text = GAPI_speech_recognition()

    string = tokenize(text)
    onset = 0
    actr.set_parameter_value(":sound-decay-time", 0.2)
    #actr.set_parameter_value(":save-audicon-history", True)
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    for word in string:
        if TOK.descr[word.kind] == "WORD":
            print(str(word.txt))
            actr.new_word_sound(str(word.txt), onset)
            onset = onset + 0.2
    actr.run(30)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
コード例 #4
0
def GAPI_speech_recognition():
    actr.reset()
    # obtain audio from the microphone
    r = sr.Recognizer()
    with sr.Microphone() as source:
        r.adjust_for_ambient_noise(source)
        print("Say something!")
        audio = r.listen(source)
        text = r.recognize_google(audio, language="it-IT")
        #text = word_tokenize(text)
    # recognize speech using Google Speech Recognition
    try:
        # for testing purposes, we're just using the default API key
        # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
        # instead of `r.recognize_google(audio)`
        print("Google Speech Recognition thinks you said " + str(text))
    except sr.UnknownValueError:
        print("Google Speech Recognition could not understand audio")
    except sr.RequestError as e:
        print(
            "Could not request results from Google Speech Recognition service; {0}"
            .format(e))
    #actr.new_word_sound(text)
    #for word in text:
    #   actr.new_word_sound(word)
    #actr.run(30)
    return text
コード例 #5
0
ファイル: tracking.py プロジェクト: segsev/M.tech-2nd-sem
def agi ():
  
    actr.reset()
  
    # open a window and add the text
  
    window = actr.open_exp_window("Moving X", visible=True)
    text = actr.add_text_to_exp_window(window, "x", x=10, y=10)
    y = 10
    
    actr.install_device(window)
    
    # schedule an event to move it but don't have the 
    # periodic-event command available at this point so
    # just use a relative event which schedules itself
    
    actr.add_command("agi-move-it",move_text)

    actr.schedule_event_relative (1, "agi-move-it",params=[text,y],maintenance=True)

    # run the model in real time since it's a visible window
    
    actr.run(3,True)

    actr.remove_command("agi-move-it")
コード例 #6
0
ファイル: run_model.py プロジェクト: UWCCDL/SyntaxPriming
def single_trial(prime_stimulus, **param_set):
    """
    This function simulates an single trial. At the begining of each trial, the model is reset.
    The model's response is collected as either DO/PO for a simplified version of full sentence
    :param prime_stimulus: dict type, the prime stimulus, indicating the condition
    :return:
    """

    global response
    response = False

    while not response:
        actr.reset()
        actr.install_device(("speech", "microphone"))
        if param_set: set_parameters(**param_set)        #reset param
        # actr.record_history('BUFFER-TRACE','production-graph-utility')

        # actr.record_history('buffer-trace', 'goal')
        # actr.set_parameter_value(':v', 't')
        syntax = prime_stimulus[-3]
        syntax_corr = prime_stimulus[-1]

        actr.add_command("model1-key-press", respond_to_speech,
                         "model1 task output-key monitor")
        actr.monitor_command("output-speech", "model1-key-press")



        task1(prime_stimulus)
        task2()

        actr.remove_command_monitor("output-speech", "model1-key-press")
        actr.remove_command("model1-key-press")

    return response
コード例 #7
0
ファイル: demo2.py プロジェクト: segsev/M.tech-2nd-sem
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    text1 = items[0]
    window = actr.open_exp_window("Letter recognition")

    actr.add_text_to_exp_window(window, text1, x=125, y=150)

    actr.add_command("demo2-key-press", respond_to_key_press,
                     "Demo2 task output-key monitor")
    actr.monitor_command("output-key", "demo2-key-press")

    global response
    response = False

    if human == True:
        while response == False:
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "demo2-key-press")
    actr.remove_command("demo2-key-press")

    return response
コード例 #8
0
def task(which=False):
    global response, response_time

    actr.reset()
    alphabet = [
        "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
        "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
    ]
    letter = actr.permute_list(alphabet)[0]
    alphabet = ["Z"] + alphabet

    if which == 'next' or which == 'previous':
        task = which
    elif actr.random(2) == 0:
        task = 'next'
    else:
        task = 'previous'

    time = 1500 + actr.random(1000)

    window = actr.open_exp_window("Simple task")

    actr.install_device(window)

    actr.add_command("pm-issues-response", respond_to_key_press,
                     "Perceptual-motor issues task response")
    actr.monitor_command("output-key", "pm-issues-response")

    actr.add_text_to_exp_window(window, letter, x=130, y=150)

    actr.add_command("pm-issue-display", display_prompt,
                     "Perceptual-motor issues task prompt display")

    actr.schedule_event_relative(time,
                                 "pm-issue-display",
                                 params=[window, task],
                                 time_in_ms=True)

    response = []
    response_time = False

    actr.run(10, True)

    actr.remove_command("pm-issue-display")
    actr.remove_command_monitor("output-key", "pm-issues-response")
    actr.remove_command("pm-issues-response")

    if (len(response) == 2 and response_time > time and response[0] == letter
            and ((task == 'next' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) - 1)) or
                 (task == 'previous' and alphabet.index(response[0]) ==
                  (alphabet.index(response[1]) + 1)))):
        result = True
    else:
        result = False

    return [task, result]
コード例 #9
0
def demo_mirror():
    init()
    actr.reset()
    actr.add_command("inner-speech-response", record_model_speech,
                     "Inner speech model response")
    actr.monitor_command("output-speech", "inner-speech-response")
    actr.install_device(["speech", "microphone"])
    actr.run(60)
    actr.remove_command_monitor("output-speech", "inner-speech-response")
    actr.remove_command("inner-speech-response")
コード例 #10
0
ファイル: past_tense.py プロジェクト: segsev/M.tech-2nd-sem
def trials(n,cont=False,v=False):

    global report,word_list,reward_check
    
    actr.add_command("reward-check",verify_reward,
                     "Past tense code check for a reward each trial.")
    actr.monitor_command("trigger-reward","reward-check")

    if not(cont) or not(word_list):
        actr.reset()
        word_list = make_word_freq_list(verbs)
        new = []
        for x in word_list:
            for y in x[1:]:
                if y not in new:
                    new.append(y)
        for x in new:
            if not(actr.chunk_p(x)):
                actr.define_chunks([x])

        print_header()
        report = []
 
    actr.set_parameter_value(":v",v)

    start = 100 * math.floor(len(report) / 100)
    count = len(report) % 100

    for i in range(n):
        add_past_tense_to_memory()
        add_past_tense_to_memory()
        reward_check = False
        target = make_one_goal()
        duration = actr.run(100)[0]
        add_to_report(target,actr.buffer_read('imaginal'))
        actr.clear_buffer('imaginal')
        count += 1
        
        if count == 100:
            rep_f_i(start, start + 100, 100)
            count = 0
            start += 100
        if not(reward_check):
            actr.print_warning("Model did not receive a reward when given %s."% target[0])

        actr.run_full_time(200 - duration)

        if duration == 100:
            actr.print_warning("Model spent 100 seconds generating a past tense for %s."%
                               target[0])

    rep_f_i(start,start+count,100)

    actr.remove_command_monitor("trigger-reward","reward-check")
    actr.remove_command("reward-check")
コード例 #11
0
ファイル: game.py プロジェクト: segsev/M.tech-2nd-sem
def play(player1, player2):
    global p1, p2, p1_position, p2_position, game_over, current_player, next_move

    # make sure that both names are valid model names

    if (player1.lower() in (x.lower() for x in actr.mp_models())) and (
            player2.lower() in (x.lower() for x in actr.mp_models())):
        actr.reset()

        actr.add_command('make-move', make_move,
                         "Model action function in simple capture game.")

        # create the goal chunks

        actr.set_current_model(player1)

        actr.define_chunks([
            'goal', 'isa', 'play', 'my-pos', 'p1', 'p1', 0, 'p2', 5, 'state',
            'move'
        ])

        actr.goal_focus('goal')

        actr.set_current_model(player2)

        actr.define_chunks([
            'goal', 'isa', 'play', 'my-pos', 'p1', 'p2', 0, 'p2', 5, 'state',
            'move'
        ])

        actr.goal_focus('goal')

        # initialize the game information

        p1 = player1
        p2 = player2
        p1_position = 0
        p2_position = 5
        game_over = False
        current_player = player1
        next_move = None

        while not (game_over):
            # until a move is made which breaks the run
            # or 1000 simulated seconds pass which is a forfeit
            actr.run(1000)
            process_move(next_move)

        # Give them a chance to process results

        actr.run_full_time(3)

        actr.remove_command('make-move')

    return game_over
コード例 #12
0
def trial(onset_time):

    actr.reset()

    letters = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    answers = []
    row = actr.random(3)
    window = actr.open_exp_window("Sperling Experiment", visible=True)

    for i in range(3):
        for j in range(4):
            txt = letters[j + (i * 4)]
            if i == row:
                answers.append(txt)
            actr.add_text_to_exp_window(window,
                                        txt,
                                        x=(75 + (j * 50)),
                                        y=(100 + (i * 50)))

    actr.install_device(window)

    if row == 0:
        freq = 2000
    elif row == 1:
        freq = 1000
    else:
        freq = 500

    actr.new_tone_sound(freq, .5, onset_time)
    actr.schedule_event_relative(900 + actr.random(200),
                                 "clear-exp-window",
                                 params=[window],
                                 time_in_ms=True)

    global responses
    responses = []

    actr.add_command("sperling-response", respond_to_key_press,
                     "Sperling task key press response monitor")
    actr.monitor_command("output-key", "sperling-response")

    actr.run(30, True)

    actr.remove_command_monitor("output-key", "sperling-response")
    actr.remove_command("sperling-response")

    if show_responses:
        print("answers: %s" % answers)
        print("responses: %s" % responses)

    return (compute_score(answers))
コード例 #13
0
def recall():

    actr.add_command(
        "grouped-response", record_response,
        "Response recording function for the tutorial grouped model.")
    global response
    response = []
    actr.reset()
    actr.run(20)
    actr.remove_command("grouped-response")
    return response
コード例 #14
0
def experiment(visible=None, show=True):
    actr.reset()

    global trials
    trials = []

    for j in range(3):
        for i in range(8):
            trials = trials + create_set(j + 1, visible)

    collect_responses(576)
    return analyze_results(show)
コード例 #15
0
    def simulate(self, trace=False, utility_offset=True):
        """Runs SP simulations using real stimuli"""
        # Function hook to modify the utility calculation
        # (will add a mismatch penalty). Need to be defined
        # before the model is loaded
        
        actr.add_command("parser-offset", self.utility_offset,
                         "Calculates a mismatch penalty for AI condition")

        actr.load_act_r_model(self.model)

        
        for condition in self.CONDITIONS:
            self.current_condition = condition
            subset = [t for t in self.trials if t.condition == condition]
            for j in range(self.n):
                actr.reset()

                # Make the model silent in case
                
                if not trace:
                    actr.set_parameter_value(":V", False)

                # The model does not really need a visual interface,
                # but the default AGI provides a virtual mic to record
                # voice output.
        
                win = actr.open_exp_window("SP", width = 80,
                                           height = 60,
                                           visible=False)
                actr.install_device(win)

                # Function hooks to record the model responses. 
                
                actr.add_command("record-response", self.record_response,
                                 "Accepts a response for the SP task")
                actr.monitor_command("output-speech",
                                     "record-response")

                
                # Run a single trial in the given condition
                
                trial = random.choice(subset)
                self.run_trial(trial)

                # Clean up the function hooks
                
                actr.remove_command_monitor("output-speech",
                                            "record-response")
                actr.remove_command("record-response")
                
        # Removes the offset
        actr.remove_command("parser-offset")
コード例 #16
0
ファイル: paired.py プロジェクト: segsev/M.tech-2nd-sem
def do_experiment(size, trials, human):

    actr.reset()

    result = []
    model = not (human)
    window = actr.open_exp_window("Paired-Associate Experiment", visible=human)

    if model:
        actr.install_device(window)

    for i in range(trials):
        score = 0
        time = 0

        for prompt, associate in actr.permute_list(pairs[20 - size:]):

            actr.clear_exp_window(window)
            actr.add_text_to_exp_window(window, prompt, x=150, y=150)

            global response
            response = ''
            start = actr.get_time(model)

            if model:
                actr.run_full_time(5)
            else:
                while (actr.get_time(False) - start) < 5000:
                    actr.process_events()

            if response == associate:
                score += 1
                time += response_time - start

            actr.clear_exp_window(window)
            actr.add_text_to_exp_window(window, associate, x=150, y=150)
            start = actr.get_time(model)

            if model:
                actr.run_full_time(5)
            else:
                while (actr.get_time(False) - start) < 5000:
                    actr.process_events()

        if score > 0:
            average_time = time / score / 1000.0
        else:
            average_time = 0

        result.append((score / size, average_time))

    return result
コード例 #17
0
def run_experiment(model_name="response-monkey.lisp",
                   time=200,
                   verbose=True,
                   visible=True,
                   trace=True,
                   params=[]):
    """Runs an experiment"""
    actr.reset()
    # current directory
    curr_dir = os.path.dirname(os.path.realpath(__file__))
    actr.load_act_r_model(os.path.join(curr_dir, model_name))

    # Set then model parameters
    for name, val in params:
        actr.set_parameter_value(name, val)

    win = actr.open_exp_window("* STROOP TASK *",
                               width=800,
                               height=600,
                               visible=visible)

    actr.install_device(win)

    task = StroopTask(setup=False)
    #task.window = win

    actr.add_command("stroop-next", task.next, "Updates the internal task")
    actr.add_command("stroop-update-window", task.update_window,
                     "Updates the window")
    actr.add_command("stroop-accept-response", task.accept_response,
                     "Accepts a response for the Stroop task")

    actr.monitor_command("output-key", "stroop-accept-response")

    task.setup(win)
    if not trace:
        actr.set_parameter_value(":V", False)
    actr.run(time)
    if verbose:
        print("-" * 80)
        task.print_stats(task.run_stats())

    # Cleans up the interface
    # (Removes all the links between ACT-R and this object).

    actr.remove_command_monitor("output-key", "stroop-accept-response")
    actr.remove_command("stroop-next")
    actr.remove_command("stroop-update-window")
    actr.remove_command("stroop-accept-response")

    # Returns the task as a Python object for further analysis of data
    return task
コード例 #18
0
def bst_set(human, vis, stims, learn=True):
    global visible

    result = []

    visible = vis

    for stim in stims:
        if not (learn) and not (human):
            actr.reset()
        do_experiment(stim, human)
        result.append(choice)

    return result
コード例 #19
0
ファイル: siegler.py プロジェクト: segsev/M.tech-2nd-sem
def trial(arg1,arg2):

    actr.reset()
    actr.install_device(["speech","microphone"])
    need_to_remove = add_speech_monitor()
    actr.new_digit_sound(arg1)
    actr.new_digit_sound(arg2,.75)
    global response
    response = False
    actr.run(30)
    if need_to_remove:
        remove_speech_monitor()

    return response
コード例 #20
0
ファイル: project.py プロジェクト: dincbasar/cogmodeling
def init_all(open_window = False):
    global window, window_reading, chunk_defs, chunk_names, correct_dict

    actr.reset()

    window = actr.open_exp_window("Letter Writing", open_window,
                                    windowX, windowY, 200, 300)
    window_reading = actr.open_exp_window("Letter Goal", open_window,
                                    windowX, windowY, 800, 300)
    actr.install_device(window_reading)
    actr.install_device(window)
    chunk_defs = define_chunks(defs = True)
    chunk_names = define_chunks(defs = False)
    correct_dict = correctDict.make_correct_dict()
コード例 #21
0
def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    target = items[0]
    foil = items[1]
    window = actr.open_exp_window("Letter difference")
    text1 = foil
    text2 = foil
    text3 = foil
    index = actr.random(3)

    if index == 0:
        text1 = target
    elif index == 1:
        text2 = target
    else:
        text3 = target

    actr.add_text_to_exp_window(window, text1, x=125, y=75)
    actr.add_text_to_exp_window(window, text2, x=75, y=175)
    actr.add_text_to_exp_window(window, text3, x=175, y=175)

    actr.add_command("unit2-key-press", respond_to_key_press,
                     "Assignment 2 task output-key monitor")
    actr.monitor_command("output-key", "unit2-key-press")

    global response
    response = ''

    if human == True:
        while response == '':
            actr.process_events()

    else:
        actr.install_device(window)
        actr.run(10, True)

    actr.remove_command_monitor("output-key", "unit2-key-press")
    actr.remove_command("unit2-key-press")

    if response.lower() == target.lower():
        return True
    else:
        return False
コード例 #22
0
def test_it ():

    actr.reset()
  
    actr.add_command("relay-speech-output", relay_speech_output, "Handle player speak actions")
    actr.monitor_command("output-speech","relay-speech-output")

    for m in actr.mp_models():
        actr.set_current_model(m)
        actr.install_device(["speech","microphone"])

    actr.run(10)
    
    actr.remove_command_monitor("output-speech", "relay-speech-output")    
    actr.remove_command("relay-speech-output")
コード例 #23
0
ファイル: fan.py プロジェクト: segsev/M.tech-2nd-sem
def sentence(person, location, target, term):

    actr.reset()

    window = actr.open_exp_window("Sentence Experiment",
                                  visible=False,
                                  width=600,
                                  height=300)
    x = 25

    actr.install_device(window)

    actr.add_command("fan-response", respond_to_key_press,
                     "Fan experiment model response")
    actr.monitor_command("output-key", "fan-response")

    if term == 'person':
        actr.pdisable("retrieve-from-location")
    else:
        actr.pdisable("retrieve-from-person")

    actr.add_text_to_exp_window(window, person, x=50, y=150, width=75)
    actr.add_text_to_exp_window(window, location, x=250, y=150, width=75)

    global response, response_time

    response = ''
    response_time = 0

    actr.run(30)

    actr.remove_command_monitor("output-key", "fan-response")
    actr.remove_command("fan-response")

    if response == '':
        return (30, False)
    elif target:
        if response.lower() == 'k'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
    else:
        if response.lower() == 'd'.lower():
            return (response_time / 1000, True)
        else:
            return (response_time / 1000, False)
コード例 #24
0
ファイル: tracking.py プロジェクト: segsev/M.tech-2nd-sem
def arbitrary ():
  
    actr.reset()
  
    feature = actr.add_visicon_features(['screen-x',15,'screen-y',20,'value',"'x'"])[0]
    x = 15

    # schedule an event to move it but don't have the 
    # periodic-event command available at this point so
    # just use a relative event which schedules itself
    
    actr.add_command("arbitrary-move-it",move_feat)

    actr.schedule_event_relative (1, "arbitrary-move-it",params=[feature,x],maintenance=True)

    # run the model in real time since it's a visible window
    
    actr.run(3,True)

    actr.remove_command("arbitrary-move-it")
コード例 #25
0
def example():
    actr.reset()

    # The first window is located at x=0 and y=0 which
    # is fine for virtual windows, but if it were a real
    # window that window would be behind the menu bar
    # at the top of the display under OS X which makes
    # it difficult to interact with.  The second window
    # is located at x=200, y=200.  The default mouse
    # position is x=0, y=0.

    w1 = actr.open_exp_window("W1",
                              visible=False,
                              width=100,
                              height=100,
                              x=0,
                              y=0)
    w2 = actr.open_exp_window("W2",
                              visible=False,
                              width=100,
                              height=100,
                              x=200,
                              y=200)

    # add text to the same local position in each window.

    actr.add_text_to_exp_window(w1, "a", x=10, y=10, color='red')
    actr.add_text_to_exp_window(w2, "a", x=10, y=10, color='blue')

    # Install both windows and the mouse cursor

    actr.install_device(w1)
    actr.install_device(w2)
    actr.install_device(["vision", "cursor", "mouse"])

    # Just run the model to have vision module process
    # things and print the visicon.
    # The model doesn't do anything.

    actr.run(1)
    actr.print_visicon()
コード例 #26
0
def experiment(n, human=False):
    l = len(test_stim)

    result = [0] * l
    p_values = [["decide-over", 0], ["decide-under", 0], ["force-over", 0],
                ["force-under", 0]]
    for i in range(n):
        actr.reset()
        d = bst_set(human, vis=True, stims=test_stim)
        for j in range(l):
            if d[j] == "over":
                result[j] += 1

        actr.hide_output()

        # for p in p_values:
        #     p[1] += production_u_value(p[0])

        actr.unhide_output()

    result = list(map(lambda x: 100 * x / n, result))

    if len(result) == len(exp_data):
        actr.correlation(result, exp_data)
        actr.mean_deviation(result, exp_data)

    print()
    print("Trial ", end="")
    for i in range(l):
        print("%-8d" % (i + 1), end="")
    print()

    print("  ", end="")
    for i in range(l):
        print("%8.2f" % result[i], end="")

    print()
    print()

    for p in p_values:
        print("%-12s: %6.4f" % (p[0], p[1] / n))
コード例 #27
0
def example ():
  
    actr.reset()
  
    """
    We will create two items in the display and have the model
    find them and move the mouse to them to see the difference
    in timing based on the width function setting.

    One feature will use the normal visual-location and object
    chunk-types and the other will use a custom visual-location
    type defined like this which replaces screen-x, screen-y,
    and distance with slots named x, y, and z instead.

    (chunk-type custom-location x y z height width size)

    The feature with using the standard visual-location slots
    will include a custom width function to make it act like
    a circle with twice its given height.
    """

    actr.add_command("custom-width", custom_width, "Return twice the height of a visual feature.")
  
  
    actr.add_visicon_features(['screen-x', 50, 'screen-y', 500, 'height', 20, 'width', 20, ':width-fn', "'custom-width'"],
                              ['isa', 'custom-location', 'x', 150, 'y', 500, 'height', 20, 'width', 20, ':x-slot', 'x', ':y-slot', 'y', ':z-slot', 'z'])
  
    # Give the vision module a chance to process the display
    # before printing the visicon.
  
    actr.run_n_events(3)
    actr.print_visicon()
  
    # run the model to move the cursor to 100,0 as a starting point.
    # and from there move to the left location, back to the start, and then
    # the right location.
  
    actr.run(10)
  
    actr.remove_command('custom-width')
コード例 #28
0
ファイル: run_model.py プロジェクト: UWCCDL/SyntaxPriming
def test1():
    # only DO trials - 10
    trials = []
    num_trials = 1
    response_list = []
    prime_template = ['isa', 'sentence',
                      'string', '...',
                      'noun1', 'n1',
                      'noun2', 'n2',
                      'verb', 'v',
                      'syntax', 'DO',
                      'syntax-corr', 'yes']
    for i in range(int(num_trials)):
        prime_sentence = copy(prime_template)
        prime_sentence[-3] = 'DO'
        prime_sentence[-1] = 'yes'
        trials.append(prime_sentence)

    actr.reset()

    # insatll device
    actr.install_device(("speech", "microphone"))

    for i in range(num_trials):
        response = single_trial(trials[i])
        syn = trials[i][-3]
        syn_corr = trials[i][-1]
        print("prime:",syn, syn_corr, "resp:", response)
        # if response=='failure':
            # print("---------------")
            #actr.sdp('DO-form', 'PO-form')
            #actr.whynot('step6-1')
            # actr.whynot_dm('DO-form', 'PO-form')

        response_list.append(response)
    print("response count()",
        "DO:", response_list.count("DO"),
        "PO:", response_list.count("PO"), "\ntotal: ", num_trials)
    print("prop_DO", response_list.count("DO")*1.0/(response_list.count("DO")+response_list.count("PO")))
コード例 #29
0
ファイル: categorize.py プロジェクト: segsev/M.tech-2nd-sem
def trial(*features):

    actr.reset()
 
    for slot,value in actr.permute_list(list(zip(slots,features))):
        attribute(slot,value)

    goal = actr.define_chunks(["state","categorize"])[0]
    actr.goal_focus(goal)
    actr.run(20)
    
    answer = actr.chunk_slot_value(actr.buffer_read("imaginal"),"category")

    if isinstance(answer,numbers.Number):
        if answer == 1 or answer == 2:
            return((answer,True))
        else:
            actr.model_output("Model responded with invalid category.")
            return((0,False))
    else:
        actr.model_output("Model did not respond or provided a non-numeric category.")
        return((0,False))
コード例 #30
0
def run_test(visible=False):

    actr.reset()

    win = actr.open_exp_window("background test",
                               visible=visible,
                               width=390,
                               height=390,
                               x=100,
                               y=100)

    actr.install_device(win)

    actr.start_hand_at_mouse()

    actr.add_image_to_exp_window(win,
                                 "background",
                                 "ref-brain.gif",
                                 x=0,
                                 y=0,
                                 width=390,
                                 height=390)

    for x in range(3):
        for y in range(3):
            actr.add_visicon_features([
                "screen-x", 164 + (x * 130), "screen-y", 164 + (y * 130),
                "height", 128, "width", 128, "name",
                ["'brain'", "'brain-%d'" % (x + (y * 3))]
            ])

    # run for the vision module to process the scene
    actr.run_n_events(2)

    actr.print_visicon()

    # run for up to 5 seconds using real-time if the window is visible
    actr.run(10, visible)