예제 #1
0
        0: [],
        '120': [1, 3, 4],
        "170": [13, 14],
        "220": [21, 26]
    }
}

stim_d = {}
for pos, group in enumerate(groups):
    print(pos)
    stim_d.update({(pos * 10) + (key + 1): {
        'text': letter,
        'position': (100 + key * 20, 120 + 50 * pos)
    }
                   for key, letter in enumerate(group)})
environ = actr.Environment()
m = Model(environ,
          subsymbolic=True,
          latency_factor=0.1,
          decay=0.5,
          retrieval_threshold=-10,
          instantaneous_noise=0.1,
          automatic_visual_search=False,
          eye_mvt_scaling_parameter=0.1,
          eye_mvt_angle_parameter=0.1,
          utility_noise=0.7)
times = []
for loop in range(100):

    environ.current_focus = (80, 120)
예제 #2
0
as lexical retrieval is completed; further processing (construction of
appropriate syn/sem structures etc.) is done in parallel to the motor module
actions.
"""

import pyactr as actr
import simpy
import re
from nltk.stem.snowball import SnowballStemmer

import numpy as np

# stemmer to generate non-logical constants for word meaning representations
stemmer = SnowballStemmer("english")

environment = actr.Environment(focus_position=(320, 180))

# we have a discourse_status feature in goal chunks, initialized to the value
# at_issue (as opposed to presupposed or unresolved DRSs)

actr.chunktype(
    "parsing_goal", "task stack1 stack2 stack3 \
                arg_stack1 arg_stack2 \
                right_edge_stack1 right_edge_stack2 \
                right_edge_stack3 right_edge_stack4 \
                parsed_word found discourse_status \
                dref_peg event_peg drs_peg prev_drs_peg embedding_level\
                entity_cataphora event_cataphora if_conseq_pred")
actr.chunktype(
    "parse_state", "node_cat daughter1 daughter2 daughter3 \
                mother mother_of_mother lex_head")
예제 #3
0
 list_of_obj = read_obj_log_file(file_path) # list_of_obj[i] has the list of objects [[obj1, probability, middlepointX, middlepointY],[obj2,...],...] for frame number i
 old_stdout = sys.stdout 
 #log_file = open("message.log", "w")
 #sys.stdout = log_file
 sys.stdout = log_line = StringIO()
 #stim_d = {key: {'text': x, 'position': (random.randint(10,630), random.randint(10, 310))} for key, x in enumerate(string.ascii_uppercase)}
 #print(stim_d)
 #stim_d = [{1: {'text': 'X', 'position': (10, 10)}, 2: {'text': 'Y', 'position': (10, 20)}, 3:{'text': 'Z', 'position': (10, 30)}},{1: {'text': 'A', 'position': (10, 40)}, 2: {'text': 'B', 'position': (10, 50)}, 3:{'text': 'C', 'position': (10, 60)}}]
 #stim_d = [{1: {'text': 'X', 'position': (10, 10)}, 2: {'text': 'Y', 'position': (10, 20)}, 3:{'text': 'Z', 'position': (10, 30)}, 4: {'text': 'A', 'position': (10, 40)}, 5: {'text': 'B', 'position': (10, 50)}, 6:{'text': 'C', 'position': (10, 60)}}]
 gaze_data_all = np.ndarray(shape=(len(list_of_obj),1), dtype=float)
 for i in tqdm(range(len(list_of_obj))):
     gaze_data = np.zeros(1)
     # for trial in range(10):
     stim_d = {key: {'text':x[0], 'position': (x[2], x[3])} for key,x in enumerate(list_of_obj[i])}
     #print(stim_d)
     environ = actr.Environment(focus_position=(0,0))
     m = Model(environ, subsymbolic=True, latency_factor=0.4, decay=0.5, retrieval_threshold=-2, instantaneous_noise=0, automatic_visual_search=True, eye_mvt_scaling_parameter=0.05, eye_mvt_angle_parameter=10) #If you don't want to use the EMMA model, specify emma=False in here
     sim = m.m.simulation(realtime=False, trace=True,  gui=False, environment_process=environ.environment_process, stimuli=stim_d, triggers='X', times=10)
     sim.run(10)
     check = 0
     for key in m.dm:
         if key.typename == '_visual':
             print(key, m.dm[key])
             check += 1
     
     if log_line.getvalue()[-2] == "]":
         
         the_line = log_line.getvalue()[-50:-2]
         eye_gaze_this = the_line.split()[-1]
         #avg_file.write(eye_gaze_this)
         gaze_data[0] = float(eye_gaze_this)
예제 #4
0
"""
Demo - pressing a key by ACT-R model. It corresponds to 'demo2' in Lisp ACT-R, unit 2.
"""

import string
import random
import warnings

import tkinter as tk  #delete later
import pyactr as actr

stimulus = random.sample(string.ascii_uppercase, 1)[0]
text = {1: {'text': stimulus, 'position': (100, 100)}}
environ = actr.Environment(focus_position=(100, 100))

m = actr.ACTRModel(environment=environ, motor_prepared=True)

actr.chunktype("chunk", "value")
actr.chunktype("read", "state")
actr.chunktype("image", "img")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="attend_let", typename="chunk", value="attend_let")
actr.makechunk(nameofchunk="response", typename="chunk", value="response")
actr.makechunk(nameofchunk="done", typename="chunk", value="done")
m.goal.add(
    actr.chunkstring(name="reading",
                     string="""
        isa     read
        state   start"""))
g2 = m.set_goal("g2")
    workbook = pd.ExcelFile(lfile)
    worksheet = workbook.parse()
    return worksheet


def load_file(lfile, index_col=None, sep=","):
    """
    Loads file as a list
    """
    csvfile = pd.read_csv(lfile, index_col=index_col, header=0, sep=sep)
    return csvfile


##############ACT-R model, basics#####################

environment = actr.Environment(size=(1366, 768), focus_position=(0, 0))

actr.chunktype("read", "state word")
actr.chunktype("parsing", "top")
actr.chunktype("word", "form cat")

#the model with basic parameters set up
parser = actr.ACTRModel(environment,
                        subsymbolic=True,
                        optimized_learning=OPTIMIZED_LEARNING,
                        retrieval_threshold=RETRIEVAL_THRESHOLD,
                        decay=DECAY,
                        emma_noise=EMMA_NOISE,
                        emma_landing_site_noise=EMMA_NOISE)

parser.productionstring(
    # gaze_data_all = np.ndarray(shape=(len(list_of_obj),1), dtype=float)
    # for i in tqdm(range(len(list_of_obj))):
    gaze_data = np.zeros(1)
    # for trial in range(10):
    stim_d = {
        key: {
            'text': key,
            'position': (x[2], x[3])
        }
        for key, x in enumerate(
            sorted(list_of_obj[1802], key=lambda objs: objs[4], reverse=True))
    }
    print(stim_d)
    environ = actr.Environment(size=aspect_ratio,
                               simulated_display_resolution=aspect_ratio,
                               simulated_screen_size=(60, 34),
                               viewing_distance=60)
    m = Model(
        environ,
        subsymbolic=True,
        latency_factor=0.4,
        decay=0.5,
        retrieval_threshold=-2,
        instantaneous_noise=0,
        automatic_visual_search=True,
        eye_mvt_scaling_parameter=0.05,
        eye_mvt_angle_parameter=10,
        emma_landing_site_noise=True
    )  #If you don't want to use the EMMA model, specify emma=False in here
    sim = m.m.simulation(realtime=False,
                         trace=True,
예제 #7
0
    isa     _manual
    cmd     press_key
    key     'F'""", utility=-5)

        self.productions = self.m._ACTRModel__productions

groups = ['WBWWB', 'BBWW', 'WBBBBW']
numbers_b = []
for group in groups:
    numbers_b.append(group.count("B"))
pos_b = ['120', '100', '120']
stim_d = {}
for pos, group in enumerate(groups):
    print(pos)
    stim_d.update({(pos*10)+(key+1): {'text': letter, 'position': (100+key*20, 120+50*pos)} for key, letter in enumerate(group)})
environ = actr.Environment(focus_position=(80,120))
m = Model(environ, subsymbolic=True, latency_factor=0.1, decay=0.5, retrieval_threshold=-10, instantaneous_noise=0.1, automatic_visual_search=False, eye_mvt_scaling_parameter=0.1, eye_mvt_angle_parameter=0.1, utility_noise=0.6) 

times = []
for loop in range(100):
    
    m.m.goals["g"].add(actr.makechunk(typename="read", state=m.start, arg1='0', arg2=None, end='8'))

    environ.current_focus = (80, 120)

    m.m.decmems = {}
    m.m.set_decmem(NEW_DECMEM)

    m.m.decmem.activations.update(LEMMA_CHUNKS)

    m.m.set_retrieval("retrieval")