def grid_search():
    ans_values = [.2, .3, .4, .5, .6, .7, .8]
    egs_values = [1, 2, 3]
    lf_values = [.1, .2, .3, .4]
    mas_values = [3, 4, 5]
    for ans_val in ans_values:
        for egs_val in egs_values:
            for lf_val in lf_values:
                for mas_val in mas_values:
                    with open("rlwm_model_nomeaning_template.lisp",
                              'r') as infile:
                        lines = infile.readlines()
                    with open("rlwm_model_nomeaning.lisp", 'w') as outfile:
                        for line in lines:
                            if line == "(bad-command)\n":
                                outfile.write(
                                    "(sgp :ans {} :egs {} :lf {} :mas {})\n".
                                    format(ans_val, egs_val, lf_val, mas_val))
                            else:
                                outfile.write(line)
                    actr.load_act_r_model(
                        os.path.abspath("rlwm_model_nomeaning.lisp").replace(
                            '/', ';')[1:])
                    data_dir = "fit_ans{}_egs{}_lf{}_mas{}_RLWM".format(
                        ans_val, egs_val, lf_val, mas_val)
                    # actr.set_parameter_value(":ans", ans_val)
                    # actr.set_parameter_value(":egs", egs_val)
                    # actr.set_parameter_value(":lf", lf_val)
                    # actr.set_parameter_value(":mas", mas_val)
                    run_subjects(range(101, 121), 1, data_dir)
Exemple #2
0
def setupACTR():
    actr.load_act_r_model(
        "/Users/paulsomers/ddpg-craft/scripts/ddpg_agent.lisp")
    actr.wait = True
    #run ACTR for 10 seconds (simulated)
    chk = actr.define_chunks(['wait', 'false'])

    actrThread = threading.Thread(target=actr.run, args=[1000])
    actrThread.start()
    actr.schedule_simple_event_now("set-buffer-chunk", ['imaginal', chk[0]])

    #once loaded add some functions that will be called by ACT-R
    actr.add_command("tic", do_tic)
    actr.add_command('set_response', set_response)

    #Setup the imaginal for an initial production
    chunk = ['isa' 'setup']

    #Wait until the first production fires
    # once act-r is running, the tic will be called to set the wait (below) to false
    while actr.wait:
        time.sleep(0.001)
        print("waiting")

    return 1
Exemple #3
0
def load_model(model="model1", param_set=None):
    actr.load_act_r_model(os.path.abspath(model + "_core.lisp"))
    # load new pramsets
    if param_set: set_parameters(**param_set)
    reward = 0  # init value
    actr.load_act_r_model(os.path.abspath(model + "_body.lisp"))
    print("######### LOADED MODEL " + model + " #########")
    print(">>", get_parameters(*get_parameters_name()), "<<")
Exemple #4
0
    def simulate(self, trace=False, utility_offset=True):
        """Runs SP simulations using real stimuli"""
        # Function hook to modify the utility calculation
        # (will add a mismatch penalty). Need to be defined
        # before the model is loaded
        
        actr.add_command("parser-offset", self.utility_offset,
                         "Calculates a mismatch penalty for AI condition")

        actr.load_act_r_model(self.model)

        
        for condition in self.CONDITIONS:
            self.current_condition = condition
            subset = [t for t in self.trials if t.condition == condition]
            for j in range(self.n):
                actr.reset()

                # Make the model silent in case
                
                if not trace:
                    actr.set_parameter_value(":V", False)

                # The model does not really need a visual interface,
                # but the default AGI provides a virtual mic to record
                # voice output.
        
                win = actr.open_exp_window("SP", width = 80,
                                           height = 60,
                                           visible=False)
                actr.install_device(win)

                # Function hooks to record the model responses. 
                
                actr.add_command("record-response", self.record_response,
                                 "Accepts a response for the SP task")
                actr.monitor_command("output-speech",
                                     "record-response")

                
                # Run a single trial in the given condition
                
                trial = random.choice(subset)
                self.run_trial(trial)

                # Clean up the function hooks
                
                actr.remove_command_monitor("output-speech",
                                            "record-response")
                actr.remove_command("record-response")
                
        # Removes the offset
        actr.remove_command("parser-offset")
Exemple #5
0
def run_experiment(model_name="response-monkey.lisp",
                   time=200,
                   verbose=True,
                   visible=True,
                   trace=True,
                   params=[]):
    """Runs an experiment"""
    actr.reset()
    # current directory
    curr_dir = os.path.dirname(os.path.realpath(__file__))
    actr.load_act_r_model(os.path.join(curr_dir, model_name))

    # Set then model parameters
    for name, val in params:
        actr.set_parameter_value(name, val)

    win = actr.open_exp_window("* STROOP TASK *",
                               width=800,
                               height=600,
                               visible=visible)

    actr.install_device(win)

    task = StroopTask(setup=False)
    #task.window = win

    actr.add_command("stroop-next", task.next, "Updates the internal task")
    actr.add_command("stroop-update-window", task.update_window,
                     "Updates the window")
    actr.add_command("stroop-accept-response", task.accept_response,
                     "Accepts a response for the Stroop task")

    actr.monitor_command("output-key", "stroop-accept-response")

    task.setup(win)
    if not trace:
        actr.set_parameter_value(":V", False)
    actr.run(time)
    if verbose:
        print("-" * 80)
        task.print_stats(task.run_stats())

    # Cleans up the interface
    # (Removes all the links between ACT-R and this object).

    actr.remove_command_monitor("output-key", "stroop-accept-response")
    actr.remove_command("stroop-next")
    actr.remove_command("stroop-update-window")
    actr.remove_command("stroop-accept-response")

    # Returns the task as a Python object for further analysis of data
    return task
Exemple #6
0
    def simulate(self):
        """Runs a single simulation"""

        # Add commands and hooks
        actr.add_command("v_offset", self.chunk_v_term,
                         "Extra term in activation")

        actr.add_command("spreading", self.spreading_activation,
                         "Overrides normal spreading activation algorithm")

        actr.add_command("monitor_retrievals", self.monitor_retrievals,
                         "Monitors what is being retrieved")

        actr.add_command("next", self.present_new_situation,
                         "Presents a new situation")

        actr.add_command("keep_table", self.add_chunk)

        # Makes sure we are loading the current model from
        # the current directory
        curr_dir = os.path.dirname(os.path.realpath(__file__))
        actr.load_act_r_model(os.path.join(curr_dir, self.model))

        actr.set_parameter_value(":V", False)
        actr.set_parameter_value(":cmdt", False)

        # Apply the set of provided parameters
        for param, value in self.model_params.items():
            actr.set_parameter_value(param, value)

        # Run a life simulation

        event_time = 0.0

        while actr.mp_time() < self.max_time:
            actr.schedule_event(event_time, "next")
            event_time += self.event_step
            actr.run(self.event_step)  # No need to run beyond the event step

        # Clean-up

        actr.remove_command("next")
        actr.remove_command("v_offset")
        actr.remove_command("spreading")
        actr.remove_command("keep_table")
        actr.remove_command("monitor_retrievals")

        # Update counter

        self.counter += 1
Exemple #7
0
def reset_actr():

    model_name = 'egocentric-salience.lisp'
    model_path = '/Users/paulsomers/COGLE/deep_salience/'

    chunk_file_name = 'chunks.pkl'
    chunk_path = os.path.join(model_path, 'data')

    actr.add_command('similarity_function', similarity)
    actr.load_act_r_model(os.path.join(model_path, model_name))
    actr.record_history("blending-trace")

    #load all the chunks
    allchunks = pickle.load(
        open(os.path.join(chunk_path, chunk_file_name), 'rb'))
    for chunk in allchunks:
        actr.add_dm(chunk)
Exemple #8
0
def setup_experiment(human=True):
    '''
    Load the correct ACT-R model, and create a window to display the words
    '''
    if subject == "controls":
        actr.load_act_r_model(
            "/Users/chisa/Desktop/masters/first_year_research_project/my_model/csm_free_recall_model.lisp"
        )
    elif subject == "depressed":
        actr.load_act_r_model(
            "/Users/chisa/Desktop/masters/first_year_research_project/my_model/csm_free_recall_model_depressed.lisp"
        )

    window = actr.open_exp_window("Free Recall Experiment",
                                  width=1024,
                                  height=768,
                                  visible=human)  # 15inch resolution window
    actr.install_device(window)
    return window
Exemple #9
0
def test3():
    actr.load_act_r_model(os.getcwd() + "/model1.lisp")  # load the model
    ans = [0.2, 0.5, 0.8]
    bll = [0.2, 0.5, 0.8]
    mas = [1.3, 1.6, 1.9]
    ia = [0.5, 1, 1.5]
    hyper_param = [{'ans':i, 'bll':j, 'mas':k, 'imaginal-activation':l} \
                   for i in ans for j in bll for k in mas for l in ia]
    best_corr = -2
    best_param = []
    for i in tqdm(range(len(hyper_param))):
        param_set = hyper_param[i]
        set_parameters(**param_set)
        corr = simulations(50)
        if corr > best_corr:
            best_corr = corr
            best_param = param_set
    print(">> best_corr", "best_param\n")
    print(best_corr, best_param)
    return (best_corr, best_param)
Exemple #10
0
def test5():
    # grid search hyper-parameter tuning
    alpha = [0.001, 0.01, 0.1, 0.2, 0.3]
    egs = [0.0, 0.3, 0.6, 0.9, 1.2, 1.5]
    r2 = [-0.1, -0.5, -1, -5, -10]
    ppm = [0, 1, 1.5]
    #hyper_param = [[i, j] for i in alpha for j in egs]
    hyper_param = [{'alpha':i, 'egs':j, 'r2':k, 'ppm':l} for i in alpha for j in egs for k in r2 for l in ppm]

    actr.load_act_r_model(os.getcwd() + "/model3.lisp")  # load the model
    best_corr = -2
    best_param = []
    for i in tqdm(range(len(hyper_param))):
        param_set=hyper_param[i]
        set_parameters(**param_set)
        corr = simulations(50)
        if corr > best_corr:
            best_corr = corr
            best_param = param_set
    print(">> best_corr", "best_param\n")
    print(best_corr, best_param)
    return (best_corr, best_param)
Exemple #11
0
def reset_actr(chunk_file_name):

    model_name = 'qsalience.lisp'
    model_path = '/Users/paulsomers/COGLE/qsalience/'

    #chunk_file_name = 'version1.chunks'
    chunk_path = os.path.join(model_path, 'data')

    actr.add_command('similarity_function', similarity)
    actr.load_act_r_model(os.path.join(model_path, model_name))
    actr.record_history("blending-trace")

    #load all the chunks
    allchunks = pickle.load(
        open(os.path.join(chunk_path, chunk_file_name), 'rb'))
    for chunk in allchunks:
        actr.add_dm(chunk)
    #global min_max
    for chunk in allchunks:
        for x, y in zip(*[iter(chunk)] * 2):
            #x, y[1]
            if not x == 'action':
                if y[1] not in min_max[x]:
                    min_max[x].append(y[1])
Exemple #12
0
def test4():
    print("############# MODEL1 #############")
    actr.load_act_r_model(os.getcwd() + "/model1.lisp")  # load the model
    simulations(50)

    print("############# MODEL2 #############")
    actr.load_act_r_model(os.getcwd() + "/model2.lisp")  # load the model
    simulations(50)

    print("############# MODEL3 #############")
    actr.load_act_r_model(os.getcwd() + "/model3.lisp")  # load the model
    simulations(50)
Exemple #13
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit4;zbrodoff-model.lisp")

trials = []
results = []

control_data = [1.84, 2.46, 2.82, 1.21, 1.45, 1.42, 1.14, 1.21, 1.17]

run_model = True


class trial():
    def __init__(self, block, addend1, addend2, sum, answer, visible=None):
        self.block = block
        self.addend2 = addend2
        self.text = addend1 + " + " + addend2 + " = " + sum
        self.answer = answer.lower()
        if visible == None:
            self.visible = not (run_model)
        else:
            self.visible = visible
        self.correct = False


def present_trial(trial, new_window=True):

    if new_window:
        w = actr.open_exp_window("Alpha-arithmetic Experiment",
                                 visible=trial.visible)
        if run_model:
Exemple #14
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit5;siegler-model.lisp")

response = False
monitor_installed = False

siegler_data = [[0, .05, .86,  0,  .02,  0, .02, 0, 0, .06],
                [0, .04, .07, .75, .04,  0, .02, 0, 0, .09],
                [0, .02, 0, .10, .75, .05, .01, .03, 0, .06],
                [.02, 0, .04, .05, .80, .04, 0, .05, 0, 0],
                [0, 0, .07, .09, .25, .45, .08, .01, .01, .06],
                [.04, 0, 0, .05, .21, .09, .48, 0, .02, .11]]


def record_model_speech (model,string):
    global response
    response = string.lower()


def add_speech_monitor():
    global monitor_installed

    if monitor_installed == False:
        actr.add_command("siegler-response",record_model_speech,"Siegler task model response")
        actr.monitor_command("output-speech","siegler-response")
        monitor_installed = True
        return True
    else:
        return False
Exemple #15
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit3;sperling-model.lisp")

responses = []
show_responses = True

exp_data = [3.03, 2.4, 2.03, 1.5]


def trial(onset_time):

    actr.reset()

    letters = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    answers = []
    row = actr.random(3)
    window = actr.open_exp_window("Sperling Experiment", visible=True)

    for i in range(3):
        for j in range(4):
            txt = letters[j + (i * 4)]
            if i == row:
                answers.append(txt)
            actr.add_text_to_exp_window(window,
                                        txt,
                                        x=(75 + (j * 50)),
                                        y=(100 + (i * 50)))
Exemple #16
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit4;paired-model.lisp")

response = False
response_time = False

pairs = list(
    zip([
        'bank', 'card', 'dart', 'face', 'game', 'hand', 'jack', 'king', 'lamb',
        'mask', 'neck', 'pipe', 'quip', 'rope', 'sock', 'tent', 'vent', 'wall',
        'xray', 'zinc'
    ], [
        '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3',
        '4', '5', '6', '7', '8', '9'
    ]))

latencies = [0.0, 2.158, 1.967, 1.762, 1.680, 1.552, 1.467, 1.402]
probabilities = [0.0, .526, .667, .798, .887, .924, .958, .954]


def task(size, trials, human=False):

    actr.add_command("paired-response", respond_to_key_press,
                     "Paired associate task key press response monitor")
    actr.monitor_command("output-key", "paired-response")

    result = do_experiment(size, trials, human)

    actr.remove_command_monitor("output-key", "paired-response")
    actr.remove_command("paired-response")
Exemple #17
0
    actr.run(10)

    # delete all of the visicon features.  delete-all-visicon-features
    # removes all of the features from the visicon.

    actr.delete_all_visicon_features()

    # Give the vision module a chance to process the display
    # before printing the visicon.

    actr.run_n_events(3)
    actr.print_visicon()

    # run the model to show the update
    actr.run(10)


"""
 The model is very simple in that it just repeatedly finds
 a location and then attends to the item there printing
 out the chunks in the visual-location and visual buffers
 after the corresponding requet completes.
 The ordering of the modifications are such that it will
 not automatically updated the attended object chunks 
 since the currently attended item is unchanged in each
 case.
"""

actr.load_act_r_model(
    "ACT-R:examples;vision-module;adjust-visicon-features-model.lisp")
Exemple #18
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit2;demo2-model.lisp")

response = False


def respond_to_key_press(model, key):
    global response

    response = key
    actr.clear_exp_window()


def experiment(human=False):

    actr.reset()

    items = actr.permute_list([
        "B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R",
        "S", "T", "V", "W", "X", "Y", "Z"
    ])
    text1 = items[0]
    window = actr.open_exp_window("Letter recognition")

    actr.add_text_to_exp_window(window, text1, x=125, y=150)

    actr.add_command("demo2-key-press", respond_to_key_press,
                     "Demo2 task output-key monitor")
    actr.monitor_command("output-key", "demo2-key-press")
# This is the Python code to run the task that goes along with the 
# creating-an-image-model. 
#
# It is a simple demonstration of creating and using the
# image AGI item, and it assumes that the smalllogo.gif
# and ref-brain.gif files are in the GUI/AGI-images directory
# if you use a visible window to see the images.
#
# To run the task call the run_test function.  It has one
# optional parameter which if provided as True indicates 
# the window should be visible.  The default is to use a virtual
# window.  

import actr

actr.load_act_r_model("ACT-R:examples;creating-image-items;creating-an-image-model.lisp")

def click_brain (text,pos):
    actr.model_output('Clicked image %s at %d %d'%(text,pos[0],pos[1]))

actr.add_command("click-brain-py",click_brain,"Example function for image click action. Do not call.")

def click_brain_2 (value):
    actr.model_output('Clicked the second brain image given value %s'%value)

actr.add_command ("click-brain-2-py",click_brain_2,"Example function for image click action with parameters. Do not call.")


def run_test (visible=False):

    actr.reset()
Exemple #20
0
# background-model.
#
# It is a demonstration of creating and using an
# image AGI item along with custom visicon features.
# It assumes that the ref-brain.gif file is in the
# GUI/AGI-images directory if you use a visible window
# to see the images.
#
# To run the task call the run-test function.  It has one
# optional parameter which if provided as any non-nil value
# indicates the window should be visible.  The default is
# to use a virtual window.

import actr

actr.load_act_r_model(
    "ACT-R:examples;creating-image-items;background-model.lisp")


def run_test(visible=False):

    actr.reset()

    win = actr.open_exp_window("background test",
                               visible=visible,
                               width=390,
                               height=390,
                               x=100,
                               y=100)

    actr.install_device(win)
Exemple #21
0
        actr.model_output("Model did not respond or provided a non-numeric category.")
        return((0,False))

def create_example_memories():

    for s in slots:
        actr.extend_possible_slots(s,False)
        actr.define_chunks([s,"isa","chunk"])

    for c in cat1:
        chunk = ["isa","example","category",1]
        for slot,value in list(zip(slots,c)):
            chunk.append(slot)
            chunk.append(value)
        actr.add_dm(chunk)

    for c in cat2:
        chunk = ["isa","example","category",2]
        for slot,value in list(zip(slots,c)):
            chunk.append(slot)
            chunk.append(value)
        actr.add_dm(chunk)

actr.add_command("create-example-memories",create_example_memories,"Categorize task function to add the initial example chunks to simulate the training process.")

actr.load_act_r_model("ACT-R:tutorial;unit8;categorize-model.lisp")
    
  


    
Exemple #22
0
import actr
import math

actr.load_act_r_model("ACT-R:tutorial;unit7;past-tense-model.lisp")

report = []
total_count = 0
word_list = []

verbs = [['have','i',12458,'had'],
         ['do','i',4367,'did'],
         ['make','i',2312,'made'],
         ['get','i',1486,'got'],
         ['use','r',1016,'use'],
         ['look','r',910,'look'],
         ['seem','r',831,'seem'],
         ['tell','i',759,'told'],
         ['show','r',640,'show'],
         ['want','r',631,'want'],
         ['call','r',627,'call'],
         ['ask','r',612,'ask'],
         ['turn','r',566,'turn'],
         ['follow','r',540,'follow'],
         ['work','r',496,'work'],
         ['live','r',472,'live'],
         ['try','r',472,'try'],
         ['stand','i',468,'stood'],
         ['move','r',447,'move'],
         ['need','r',413,'need'],
         ['start','r',386,'start'],
         ['lose','i',274,'lost']]
Exemple #23
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit5;fan-no-pm-model.lisp")

person_location_data = [
    1.11, 1.17, 1.22, 1.17, 1.20, 1.22, 1.15, 1.23, 1.36, 1.20, 1.22, 1.26,
    1.25, 1.36, 1.29, 1.26, 1.47, 1.47
]


def sentence(person, location, target, term):

    actr.reset()

    if term == 'person':
        actr.pdisable("retrieve-from-location")
    else:
        actr.pdisable("retrieve-from-person")

    actr.mod_chunk("goal", "arg1", person, "arg2", location, "state", "test")

    response_time = actr.run(30)[0]
    response = actr.chunk_slot_value(actr.buffer_read("goal"), "state")

    if target:
        if response.lower() == "'k'".lower():
            return (response_time, True)
        else:
            return (response_time, False)
    else:
        if response.lower() == "'d'".lower():
#!/usr/bin/env python

# -*- coding: utf-8 -*-
# @Time : 2019/11/18 19:32
# @Author : Yulong Sun
# @Site :
# @File : zbrodoff_improve.py
# @Software: PyCharm

import actr

actr.load_act_r_model(
    r"C:\Users\syl\Desktop\ACTR_ATO\zbrodoff_improve\zbrodoff_model.lisp")

trials = []
results = []

control_data = [1.84, 2.46, 2.82, 1.21, 1.45, 1.42, 1.14, 1.21, 1.17]

run_model = True


class trial():
    def __init__(self, block, addend1, addend2, sum, answer, visible=None):
        self.block = block
        self.addend2 = addend2
        self.text = addend1 + " + " + addend2 + " = " + sum
        self.answer = answer.lower()
        if visible == None:
            self.visible = not (run_model)
        else:
#this shuffles both lists, stimuli and associated correct responses, in the same order
stims_temp = list(
    zip(
        np.repeat(stims_3, 12).tolist(),
        np.repeat(stims_3_resps, 12).tolist()))

rnd.shuffle(stims_temp)

#stims, cor_resps = zip(*stims_temp)
##########debug########
stims = ['cup']

# In[3]:

#Load model
model = actr.load_act_r_model(
    '/home/master-tedward/RLWM_ACTR/memory_model2.lisp')

#variables needed
chunks = None
current_response = np.repeat('x', nTrials).tolist()
accuracy = np.repeat(0, nTrials).tolist()

i = 0
win = None

#Daisy chained python functions to present stimuli, get response and  present feedback


def present_stim():
    global chunks
    global stims
# # @Time : 2019/11/22 11:11
# # @Author : Yulong Sun
# # @Site :
# # @File : actr-parking-bst.py
# # @Software: PyCharm
"""
假定:优秀驾驶员看到速度和不同制动级位就能判断出大概距离
初始条件:初始距离,初始速度
这个程序的思路使让actr看到距离停车标的初始距离和实际到达精度30m,让actr从不同的制动级位的长度下进行选择,不断缩小距离最后达到实际精度
"""

import actr
import random
import time

actr.load_act_r_model(
    r"C:\Users\syl\Desktop\ACTR_ATO\actr-parking-bst\actr-parking.lisp")

target = None
current_stick = None
current_line = None
done = False
choice = None
window = None
visible = False

exp_data = [20, 67, 20, 47, 87, 20, 80, 93, 83, 13, 29, 27, 80, 73, 53]
exp_stims = [[15, 250, 55, 125], [10, 155, 22, 101], [14, 200, 37, 112],
             [22, 200, 32, 114], [10, 243, 37, 159], [22, 175, 40, 73],
             [15, 250, 49, 137], [10, 179, 32, 105], [20, 213, 42, 104],
             [14, 237, 51, 116], [12, 149, 30, 72], [14, 237, 51, 121],
             [22, 200, 32, 114], [14, 200, 37, 112], [15, 250, 55, 125]]
Exemple #27
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit5;grouped-model.lisp")

response = []


def recall():

    actr.add_command(
        "grouped-response", record_response,
        "Response recording function for the tutorial grouped model.")
    global response
    response = []
    actr.reset()
    actr.run(20)
    actr.remove_command("grouped-response")
    return response


def record_response(item):

    global response
    response.append(item)
Exemple #28
0
    actr.schedule_event_relative(5,
                                 'utility-learning-issues-show-result',
                                 params=[answer],
                                 output='medium')


def show_result(choice):
    actr.mod_chunk('response', 'answer', choice)
    actr.set_buffer_chunk('imaginal', 'response')
    actr.schedule_event_relative(2,
                                 'utility-learning-issues-choose',
                                 output='medium')


actr.add_command(
    'utility-learning-issues-choose', present_choose,
    "Function to change model's goal for utility learning issues model")
actr.add_command(
    'utility-learning-issues-show-result', show_result,
    "Function to set the model's imaginal buffer for utility learning issues model"
)


def finished():
    actr.remove_command('utility-learning-issues-choose')
    actr.remove_command('utility-learning-issues-show-result')


actr.load_act_r_model(
    "ACT-R:tutorial;unit7;utility-learning-issues-model.lisp")
Exemple #29
0
from geometry_msgs.msg import PoseStamped, Pose
import moveit_msgs.msg
from moveit_msgs.msg import PlanningScene, ObjectColor
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
import argparse
import time

global robot, scene, scene_pub
global right_arm_group, head, left_arm_group, both_arms_group
#global right_hand_group
#global gripper_pose_pub
#global eef, eef_link, touch_links

actr.load_act_r_model("ACT-R:INNER;DemoiScience;inner_model.lisp")
#actr.load_act_r_model("ACT-R:INNER;DemoSpecchio;mirror.lisp")

global response, response_time
response = ""
response_time = False

global IP, tts, asr


def init():
    global robot, scene, scene_pub, gripper_pose_pub
    global right_arm_group, right_hand_group, head, both_arms_group, left_arm_group

    #Initialize the move_group API
    moveit_commander.roscpp_initialize(sys.argv)
Exemple #30
0
import actr

actr.load_act_r_model("ACT-R:tutorial;unit7;production-compilation-issues-model.lisp")

val1 = ''
val2 = ''
responses = []
start_time = 0
times = []
exp_length = 0
task_over = True
task_state = None
window = None

result_matrix = [[["win","lose","draw","lose"],["win","lose","draw","lose"],["win","lose","draw","lose"],["lose","draw","win","lose"]],
                 [["win","lose","draw","lose"],["win","lose","draw","lose"],["lose","draw","win","lose"],["lose","draw","win","lose"]],
                 [["win","lose","draw","lose"],["win","lose","draw","lose"],["lose","draw","win","lose"],["lose","draw","win","lose"]],
                 [["win","lose","draw","lose"],["lose","draw","win","lose"],["lose","draw","win","lose"],["lose","draw","win","lose"]]]

def convert_key_to_index(key):
    if key.lower() == 's':
        return 0
    if key.lower() == 'd':
        return 1
    if key.lower() == 'f':
        return 2
    else:
        return 3