def set_similarities(self, chunk, otherchunk, value): """ Set similarities between chunks. By default, different chunks have the value of -1. chunk and otherchunk are two chunks whose similarities are set. value must be a non-positive number. """ if value > 0: raise utilities.ACTRError("Values in similarities must be 0 or smaller than 0") self.__similarities[tuple((chunk, otherchunk))] = value self.__similarities[tuple((otherchunk, chunk))] = value
def productionstring(self, name='', string='', utility=0, reward=None): """ Create a production rule when given a string. The string is specified in the following form (as a string): LHS ==> RHS The following example would be a rule that checks the buffer 'g' and if the buffer has value one, it will reset it to two: >>> ACTRModel().productionstring(name='example0', string='=g>\ isa example\ value one\ ==>\ =g>\ isa example\ value two') {'=g': example(value= one)} ==> {'=g': example(value= two)} """ if not name: name = "unnamedrule" + productions.Productions._undefinedrulecounter productions.Productions._undefinedrulecounter += 1 temp_dictRHS = {v: k for k, v in utilities._RHSCONVENTIONS.items()} temp_dictLHS = {v: k for k, v in utilities._LHSCONVENTIONS.items()} rule_reader = utilities.getrule() try: rule = rule_reader.parseString(string, parseAll=True) except pyparsing.ParseException as e: raise(utilities.ACTRError("The rule '%s' could not be parsed. The following error was observed: %s" %(name, e))) lhs, rhs = {}, {} def func(): for each in rule[0]: if each[0] == temp_dictLHS["query"]: lhs[each[0]+each[1]] = {x[0]:x[1] for x in each[3]} else: try: type_chunk, chunk_dict = chunks.createchunkdict(each[3]) except utilities.ACTRError as e: raise utilities.ACTRError("The rule string %s is not defined correctly; %s" %(name, e)) lhs[each[0]+each[1]] = chunks.makechunk("", type_chunk, **chunk_dict) yield lhs for each in rule[2]: if each[0] == temp_dictRHS["extra_test"]: rhs[each[0]+each[1]] = {x[0]:x[1] for x in each[3]} elif each[0] == temp_dictRHS["clear"]: rhs[each[0]+each[1]] = None elif each[0] == temp_dictRHS["execute"]: rhs[each[0]+each[1]] = each[3] else: try: type_chunk, chunk_dict = chunks.createchunkdict(each[3]) except utilities.ACTRError as e: raise utilities.ACTRError("The rule string %s is not defined correctly; %s" %(name, e)) rhs[each[0]+each[1]] = chunks.makechunk("", type_chunk, **chunk_dict) yield rhs self.productions.update({name: {"rule": func, "utility": utility, "reward": reward}}) return self.productions[name]
def makechunk(nameofchunk="", typename="", **dictionary): """ Create a chunk. Three values can be specified: (i) the name of the chunk (the name could be used if the chunk appears as a value of other chunks or production rules) (ii) its type (ii) slot-value pairs. For example: >>> makechunk(nameofchunk='example0', typename='chunktype_example0', value='one') chunktype_example0(value= one) This creates a chunk of type chunk1, which has one slot (value) and the value of that slot is one. """ if not nameofchunk: nameofchunk = "unnamedchunk" if not typename: typename = "undefined" + str(Chunk._undefinedchunktypecounter) Chunk._undefinedchunktypecounter += 1 for key in dictionary: if isinstance(dictionary[key], Chunk): pass elif isinstance(dictionary[key], utilities.VarvalClass): pass else: try: temp_dict = utilities.stringsplitting(str(dictionary[key])) except utilities.ACTRError as e: raise utilities.ACTRError( "The chunk value %s is not defined correctly; %s" % (dictionary[key], e)) loop_dict = temp_dict.copy() for x in loop_dict: if x == "negvariables" or x == "negvalues": val = tuple(temp_dict[x]) else: try: val = temp_dict[x].pop() except KeyError: val = None temp_dict[x] = val dictionary[key] = utilities.VarvalClass(**temp_dict) created_chunk = Chunk(typename, **dictionary) created_chunk._chunks[nameofchunk] = created_chunk return created_chunk
def modify(self, otherchunk, actrvariables=None): """ Modify the chunk in Buffer according to the info in otherchunk. """ if actrvariables == None: actrvariables = {} elem = self._data.pop() try: mod_attr_val = {x[0]: utilities.check_bound_vars(actrvariables, x[1]) for x in otherchunk.removeunused()} #creates dict of attr-val pairs according to otherchunk except utilities.ACTRError as arg: raise utilities.ACTRError("The chunk '%s' is not defined correctly; %s" % (otherchunk, arg)) elem_attr_val = {x[0]: x[1] for x in elem} elem_attr_val.update(mod_attr_val) #updates original chunk with attr-val from otherchunk mod_chunk = chunks.Chunk(otherchunk.typename, **elem_attr_val) #creates new chunk self._data.add(mod_chunk) #put chunk directly into buffer
def chunkstring(name='', string=''): """ Create a chunk when given a string. The string is specified in the form: slot value slot value (arbitrary number of slot-value pairs can be used). isa-slot is used as the type of chunk. If no isa-slot is provided, chunk is assigned an 'undefined' type. For example: >>> chunkstring(name="example0", string='isa chunktype_example0 value one') chunktype_example0(value= one) """ chunk_reader = utilities.getchunk() chunk = chunk_reader.parseString(string, parseAll=True) try: type_chunk, chunk_dict = createchunkdict(chunk) except utilities.ACTRError as e: raise utilities.ACTRError("The chunk string %s is not defined correctly; %s" %(string, e)) created_chunk = makechunk(name, type_chunk, **chunk_dict) return created_chunk
def create(self, otherchunk, harvest=None, actrvariables=None): """ Create (aka set) a chunk in goal buffer. """ try: mod_attr_val = { x[0]: utilities.check_bound_vars(actrvariables, x[1]) for x in otherchunk.removeunused() } #creates dict of attr-val pairs according to otherchunk except utilities.ACTRError as arg: raise utilities.ACTRError( "Setting the buffer using the chunk '%s' is impossible; %s" % (otherchunk, arg)) new_chunk = chunks.Chunk(otherchunk.typename, **mod_attr_val) #creates new chunk self.add(new_chunk, 0, harvest) #put chunk using add
def __init__(self, environment=None, **model_parameters): self.chunktype = chunks.chunktype self.chunkstring = chunks.chunkstring self.visbuffers = {} start_goal = goals.Goal() self.goals = {"g": start_goal} self.__buffers = {"g": start_goal} start_retrieval = declarative.DecMemBuffer() self.retrievals = {"retrieval": start_retrieval} self.__buffers["retrieval"] = start_retrieval start_dm = declarative.DecMem() self.decmems = {"decmem": start_dm} self.__productions = productions.Productions() self.__similarities = {} self.model_parameters = self.MODEL_PARAMETERS.copy() try: if not set(model_parameters.keys()).issubset( set(self.MODEL_PARAMETERS.keys())): raise (utilities.ACTRError( "Incorrect model parameter(s) %s. The only possible model parameters are: '%s'" % (set(model_parameters.keys()).difference( set(self.MODEL_PARAMETERS.keys())), set(self.MODEL_PARAMETERS.keys())))) self.model_parameters.update(model_parameters) except TypeError: pass self.__env = environment
def retrieve(self, time, otherchunk, actrvariables, buffers, extra_tests, model_parameters): """ Retrieve a chunk from declarative memory that matches otherchunk. """ model_parameters = model_parameters.copy() model_parameters.update(self.model_parameters) if actrvariables == None: actrvariables = {} try: mod_attr_val = {x[0]: utilities.check_bound_vars(actrvariables, x[1], negative_impossible=False) for x in otherchunk.removeunused()} except utilities.ACTRError as arg: raise utilities.ACTRError("Retrieving the chunk '%s' is impossible; %s" % (otherchunk, arg)) chunk_tobe_matched = chunks.Chunk(otherchunk.typename, **mod_attr_val) max_A = float("-inf") #collect the subset of dm that is useful (only chunks that match the searched chunk will be used if len(chunk_tobe_matched.removeunused()) == 0 or ( model_parameters["subsymbolic"] and model_parameters["partial_matching"]): used_dm = self.dm else: used_dm = {} for x in chunk_tobe_matched.removeunused(): temp_chunk = chunks.Chunk(typename=getattr(chunk_tobe_matched, "typename"), **{x[0]: x[1]}) temp_data = {} for x in self.dm._data: if temp_chunk <= x: temp_data.update(self.dm._data[x]) #update used_dm with found chunks (either by creating it, if it is empty, or by intersecting with already present chunks) if not used_dm: used_dm = temp_data elif len(used_dm) <= len(temp_data): temp_data2 = {} for i in used_dm: if i in temp_data: temp_data2[i] = temp_data[i] used_dm = temp_data2 elif len(temp_data) < len(used_dm): temp_data2 = {} for i in temp_data: if i in used_dm: temp_data2[i] = used_dm[i] used_dm = temp_data2 retrieved = None #loop through this subset and check activation for chunk in used_dm: try: if extra_tests["recently_retrieved"] == False or extra_tests["recently_retrieved"] == 'False': if self.__finst and chunk in self.recent: continue else: if self.__finst and chunk not in self.recent: continue except KeyError: pass if model_parameters["subsymbolic"]: #if subsymbolic, check activation A_pm = 0 if model_parameters["partial_matching"]: A_pm = chunk_tobe_matched.match(chunk, partialmatching=True, mismatch_penalty=model_parameters["mismatch_penalty"]) else: if not chunk_tobe_matched <= chunk: continue try: A_bll = utilities.baselevel_learning(time, self.dm[chunk], model_parameters["baselevel_learning"], model_parameters["decay"], self.dm.activations.get(chunk), optimized_learning=model_parameters["optimized_learning"]) #bll except UnboundLocalError: continue if math.isnan(A_bll): raise utilities.ACTRError("The following chunk cannot receive base activation: %s. The reason is that one of its traces did not appear in a past moment." % chunk) try: A_sa = utilities.spreading_activation(chunk, buffers, self.dm, model_parameters["buffer_spreading_activation"], model_parameters["strength_of_association"], model_parameters["spreading_activation_restricted"], model_parameters["association_only_from_chunks"]) except IndexError: A_sa = float(0) inst_noise = utilities.calculate_instantaneous_noise(model_parameters["instantaneous_noise"]) A = A_bll + A_sa + A_pm + inst_noise #chunk.activation is the manually specified activation, potentially used by the modeller if utilities.retrieval_success(A, model_parameters["retrieval_threshold"]) and max_A < A: self.spreading_activation = A_sa max_A = A self.activation = max_A retrieved = chunk extra_time = utilities.retrieval_latency(A, model_parameters["latency_factor"], model_parameters["latency_exponent"]) if model_parameters["activation_trace"]: print("(Partially) matching chunk:", chunk) print("Base level learning:", A_bll) print("Spreading activation", A_sa) print("Partial matching", A_pm) print("Noise:", inst_noise) print("Total activation", A) print("Time to retrieve", extra_time) else: #otherwise, just standard time for rule firing, so no extra calculation needed if chunk_tobe_matched <= chunk and self.dm[chunk][0] != time: #the second condition ensures that the chunk that was created are not retrieved at the same time retrieved = chunk extra_time = model_parameters["rule_firing"] if not retrieved: self.activation, self.spreading_activation = None, None if model_parameters["subsymbolic"]: extra_time = utilities.retrieval_latency(model_parameters["retrieval_threshold"], model_parameters["latency_factor"], model_parameters["latency_exponent"]) else: extra_time = model_parameters["rule_firing"] if self.__finst: self.recent.append(retrieved) if self.__finst < len(self.recent): self.recent.popleft() return retrieved, extra_time
def environment_process(self, stimuli=None, triggers=None, times=1, start_time=0): """ Example of environment process. Text appears, changes/disappers after run_time runs out. This does not do anything on its own, it has to be embedded in the simulation of an ACT-R Model. """ #subtract start_time from initial_time start_time = self.initial_time - start_time #make all arguments iterables if they are not yet if isinstance(stimuli, str) or isinstance( stimuli, collections.Mapping) or not isinstance( stimuli, collections.Iterable): stimuli = [stimuli] for idx in range(len(stimuli)): if isinstance(stimuli[idx], collections.Mapping): for each in stimuli[idx]: if not isinstance( stimuli[idx][each], collections.Mapping ): #stimuli[idx][each] encodes position etc. raise utilities.ACTRError( "Arguments of stimuli, if any, must be dictionaries, e.g.,: [{'stimulus1-0time': {'text': 'hi', 'position': (0, 0)}, 'stimulus2-0time': {'text': 'you', 'position': (10, 10)}}, {'stimulus3-latertime': {'text': 'new', 'position': (0, 0)}}] etc. Currently, you have this: '%s'" % stimuli[idx]) else: stimuli[idx] = { stimuli[idx]: { 'position': (320, 180) } } #default position - 0,0 if isinstance(triggers, str) or not isinstance(triggers, collections.Iterable): triggers = [triggers] if isinstance(times, str) or not isinstance(times, collections.Iterable): times = [times] #sanity checks - each arg must match in length, or an argument must be of length 1 (2 for positions) if len(stimuli) != len(triggers): if len(stimuli) == 1: stimuli = stimuli * len(triggers) elif len(triggers) == 1: triggers = triggers * len(stimuli) else: raise utilities.ACTRError( "In environment, stimuli must be the same length as triggers or one of the two must be of length 1" ) if len(stimuli) != len(times): if len(times) == 1: times = times * len(stimuli) else: raise utilities.ACTRError( "In environment, times must be the same length as stimuli or times must be of length 1" ) self.stimuli = stimuli try: self.triggers = [x.upper() for x in triggers] except AttributeError: raise utilities.ACTRError( "Triggers are not strings; currently nothing else than strings are allowed as triggers in environment" ) self.times = times time = start_time yield self.Event( self.roundtime(time), self._ENV, "STARTING ENVIRONMENT" ) #yield Event; Event has three positions - time, process, in this case, ENVIRONMENT (specified in self._ENV) and description of action for idx, stimulus in enumerate( self.stimuli ): #run through elems, print them, yield a corresponding event self.run_time = self.times[idx] #current run_time time = time + self.run_time self.trigger = self.triggers[idx] #current trigger self.output(stimulus) #output on environment yield self.Event(self.roundtime(time), self._ENV, "PRINTED NEW STIMULUS")
def retrieve(self, time, otherchunk, actrvariables, buffers, extra_tests, model_parameters): """ Retrieve a chunk from declarative memory that matches otherchunk. """ model_parameters = model_parameters.copy() model_parameters.update(self.model_parameters) if actrvariables == None: actrvariables = {} try: mod_attr_val = { x[0]: utilities.check_bound_vars(actrvariables, x[1]) for x in otherchunk.removeunused() } except utilities.ACTRError as arg: raise utilities.ACTRError( "The chunk '%s' is not defined correctly; %s" % (otherchunk, arg)) chunk_tobe_matched = chunks.Chunk(otherchunk.typename, **mod_attr_val) max_A = float("-inf") retrieved = None for chunk in self.dm: try: if extra_tests["recently_retrieved"] == False or extra_tests[ "recently_retrieved"] == 'False': if self.__finst and chunk in self.recent: continue else: if self.__finst and chunk not in self.recent: continue except KeyError: pass if model_parameters[ "subsymbolic"]: #if subsymbolic, check activation A_pm = 0 if model_parameters["partial_matching"]: A_pm = chunk_tobe_matched.match( chunk, partialmatching=True, mismatch_penalty=model_parameters["mismatch_penalty"]) else: if not chunk_tobe_matched <= chunk: continue if chunk in self.dm.activations: A_bll = utilities.baselevel_learning( time, self.dm[chunk], model_parameters["baselevel_learning"], model_parameters["decay"], self.dm.activations[chunk], optimized_learning=model_parameters[ "optimized_learning"]) #bll else: A_bll = utilities.baselevel_learning( time, self.dm[chunk], model_parameters["baselevel_learning"], model_parameters["decay"], optimized_learning=model_parameters[ "optimized_learning"]) #bll A_sa = utilities.spreading_activation( chunk, buffers, self.dm, model_parameters["buffer_spreading_activation"], model_parameters["strength_of_association"], model_parameters["spreading_activation_restricted"], model_parameters["association_only_from_chunks"]) inst_noise = utilities.calculate_instantanoues_noise( model_parameters["instantaneous_noise"]) A = A_bll + A_sa + A_pm + inst_noise #chunk.activation is the manually specified activation, potentially used by the modeller if utilities.retrieval_success( A, model_parameters["retrieval_threshold"]) and max_A < A: max_A = A self.activation = max_A retrieved = chunk extra_time = utilities.retrieval_latency( A, model_parameters["latency_factor"], model_parameters["latency_exponent"]) if model_parameters["activation_trace"]: print("(Partially) matching chunk:", chunk) print("Base level learning:", A_bll) print("Spreading activation", A_sa) print("Partial matching", A_pm) print("Noise:", inst_noise) print("Total activation", A) print("Time to retrieve", extra_time) else: #otherwise, just standard time for rule firing if chunk_tobe_matched <= chunk: retrieved = chunk extra_time = model_parameters["rule_firing"] if not retrieved: if model_parameters["subsymbolic"]: extra_time = utilities.retrieval_latency( model_parameters["retrieval_threshold"], model_parameters["latency_factor"], model_parameters["latency_exponent"]) else: extra_time = model_parameters["rule_firing"] if self.__finst: self.recent.append(retrieved) if self.__finst < len(self.recent): self.recent.popleft() return retrieved, extra_time
def __init__(self, typename, **dictionary): self.typename = typename self.boundvars = {} #dict of bound variables kwargs = {} for key in dictionary: #change values (and values in a tuple) into string, when possible (when the value is not another chunk) if isinstance(dictionary[key], Chunk): dictionary[key] = utilities.VarvalClass(variables=None, values=dictionary[key], negvariables=(), negvalues=()) elif isinstance(dictionary[key], utilities.VarvalClass): for x in dictionary[key]._fields: if x in {"values", "variables"} and not isinstance( getattr(dictionary[key], x), str) and getattr( dictionary[key], x) != self.__emptyvalue and not isinstance( getattr(dictionary[key], x), Chunk): raise TypeError( "Values and variables must be strings, chunks or empty (None)" ) elif x in { "negvariables", "negvalues" } and (not isinstance(getattr(dictionary[key], x), collections.abc.Sequence) or isinstance(getattr(dictionary[key], x), collections.abc.MutableSequence)): raise TypeError( "Negvalues and negvariables must be tuples") elif (isinstance(dictionary[key], collections.abc.Iterable) and not isinstance(dictionary[key], str)) or not isinstance( dictionary[key], collections.abc.Hashable): raise ValueError( "The value of a chunk slot must be hashable and not iterable; you are using an illegal type for the value of the chunk slot %s, namely %s" % (key, type(dictionary[key]))) else: #create namedtuple varval and split dictionary[key] into variables, values, negvariables, negvalues try: temp_dict = utilities.stringsplitting(str(dictionary[key])) except utilities.ACTRError as e: raise utilities.ACTRError( "The chunk %s is not defined correctly; %s" % (dictionary[key], e)) loop_dict = temp_dict.copy() for x in loop_dict: if x == "negvariables" or x == "negvalues": val = tuple(temp_dict[x]) else: try: val = temp_dict[x].pop() except KeyError: val = None temp_dict[x] = val dictionary[key] = utilities.VarvalClass(**temp_dict) #adding _ to minimize/avoid name clashes kwargs[key + "_"] = dictionary[key] try: for elem in self._chunktypes[typename]._fields: if elem not in kwargs: kwargs[ elem] = self.__emptyvalue #emptyvalues are explicitly added to attributes that were left out dictionary[ elem[: -1]] = self.__emptyvalue #emptyvalues are also added to attributes in the original dictionary (since this might be used for chunktype creation later) if set(self._chunktypes[typename]._fields) != set(kwargs.keys()): chunktype( typename, dictionary.keys() ) #If there are more args than in the original chunktype, chunktype has to be created again, with slots for new attributes warnings.warn("Chunk type %s is extended with new attributes" % typename) except KeyError: chunktype(typename, dictionary.keys() ) #If chunktype completely missing, it is created first warnings.warn( "Chunk type %s was not defined; added automatically" % typename) finally: self.actrchunk = self._chunktypes[typename](**kwargs) self.__empty = None #this will store what the chunk looks like without empty values (the values will be stored on the first call of the relevant function) self.__unused = None #this will store what the chunk looks like without unused values self.__hash = None, self.boundvars.copy( ) #this will store the hash along with variables (hash changes if some variables are resolved)
def createchunkdict(chunk): """ Create typename and chunkdict from pyparsed list. """ sp_dict = { utilities.ACTRVARIABLE: "variables", utilities.ACTRNEG: "negvalues", utilities.ACTRNEG + utilities.ACTRVARIABLE: "negvariables", utilities.ACTRVALUE: "values", utilities.ACTRNEG + utilities.ACTRVALUE: "negvalues" } chunk_dict = {} for elem in chunk: temp_dict = chunk_dict.get( elem[0], utilities.VarvalClass(variables=set(), values=set(), negvariables=set(), negvalues=set())._asdict()) for idx in range(1, len(elem)): try: if elem[idx][0][0] == utilities.VISIONGREATER or elem[idx][0][ 0] == utilities.VISIONSMALLER: #this checks special visual conditions on greater/smaller than if elem[idx][0][-1] == utilities.ACTRVARIABLE: temp_dict['variables'].add(elem[idx][1]) update_val = elem[idx][0][0] else: update_val = elem[idx][0] + elem[idx][1] #here fix updating = 'values' elif elem[idx][1][0] == "'" or elem[idx][1][0] == '"': updating = sp_dict[elem[idx][0]] update_val = elem[idx][1][1:-1] else: updating = sp_dict[elem[idx][0]] update_val = elem[idx][1] except ( KeyError, IndexError ) as err: #indexerror --> only a string is present; keyerror: the first element in elem[idx] is not a special symbol (in sp) if elem[idx][0] == "'" or elem[idx][0] == '"': update_val = elem[idx][1:-1] else: #check if the string is an existing chunk in the database of chunks try: update_val = Chunk._chunks[elem[idx]] #if not, save it as a string except KeyError: update_val = elem[idx] updating = 'values' finally: temp_dict[updating].add(update_val) chunk_dict[elem[0]] = temp_dict for key in chunk_dict: chunk_dict[key]["negvalues"] = tuple(chunk_dict[key]["negvalues"]) chunk_dict[key]["negvariables"] = tuple( chunk_dict[key]["negvariables"]) for x in ["values", "variables"]: if len(chunk_dict[key][x]) > 1: raise utilities.ACTRError( "Any slot must have fewer than two %s, there is more than one in this slot" % x) elif len(chunk_dict[key][x]) == 1: chunk_dict[key][x] = chunk_dict[key][x].pop() else: chunk_dict[key][x] = None chunk_dict[key] = utilities.VarvalClass(**chunk_dict[key]) type_chunk = "" try: type_chunk = chunk_dict.pop( "isa" ).values #change this - any combination of capital/small letters type_chunk = chunk_dict.pop("ISA").values type_chunk = chunk_dict.pop("Isa").values except KeyError: pass return type_chunk, chunk_dict
def find(self, otherchunk, actrvariables=None, extra_tests=None): """ Set a chunk in vision based on what is on the screen. """ if extra_tests == None: extra_tests = {} if actrvariables == None: actrvariables = {} try: mod_attr_val = {x[0]: utilities.check_bound_vars(actrvariables, x[1]) for x in otherchunk.removeunused()} except utilities.ACTRError as arg: raise utilities.ACTRError("The chunk '%s' is not defined correctly; %s" % (otherchunk, arg)) chunk_used_for_search = chunks.Chunk(utilities.VISUALLOCATION, **mod_attr_val) found = None found_stim = None closest = float("inf") x_closest = float("inf") y_closest = float("inf") current_x = None current_y = None for each in self.environment.stimulus: position = (int(self.environment.stimulus[each]['position'][0]), int(self.environment.stimulus[each]['position'][1])) try: #checks absolute position if chunk_used_for_search.screen_x and int(chunk_used_for_search.screen_x) != position[0]: continue except (TypeError, ValueError): pass try: #checks absolute position if chunk_used_for_search.screen_y and int(chunk_used_for_search.screen_y) != position[1]: continue except (TypeError, ValueError): pass try: #checks on x and y relative positions if chunk_used_for_search.screen_x[0] == utilities.VISIONSMALLER and int(chunk_used_for_search.screen_x[1:]) <= position[0]: continue elif chunk_used_for_search.screen_x[0] == utilities.VISIONGREATER and int(chunk_used_for_search.screen_x[1:]) >= position[0]: continue except (TypeError, IndexError): pass try: #checks on x and y relative positions if chunk_used_for_search.screen_y[0] == utilities.VISIONSMALLER and int(chunk_used_for_search.screen_y[1:]) <= position[1]: continue elif chunk_used_for_search.screen_y[0] == utilities.VISIONGREATER and int(chunk_used_for_search.screen_y[1:]) >= position[1]: continue except (TypeError, IndexError): pass try: #checks on x and y absolute positions if chunk_used_for_search.screen_x == utilities.VISIONLOWEST and current_x != None and position[0] > current_x: continue elif chunk_used_for_search.screen_x == utilities.VISIONHIGHEST and current_x != None and position[0] < current_x: continue except TypeError: pass try: #checks on x and y absolute positions if chunk_used_for_search.screen_y == utilities.VISIONLOWEST and current_y != None and position[1] > current_y: continue elif chunk_used_for_search.screen_y == utilities.VISIONHIGHEST and current_y != None and position[1] < current_y: continue except TypeError: pass try: if extra_tests["attended"] == False or extra_tests["attended"] == 'False': if self.finst and self.environment.stimulus[each] in self.recent: continue else: if self.finst and self.environment.stimulus[each] not in self.recent: continue except KeyError: pass try: #checks on closest if (chunk_used_for_search.screen_x == utilities.VISIONCLOSEST or chunk_used_for_search.screen_y == utilities.VISIONCLOSEST) and utilities.calculate_pythagorian_distance(self.environment.current_focus, position) > closest: continue except TypeError: pass try: #checks on onewayclosest if (chunk_used_for_search.screen_x == utilities.VISIONONEWAYCLOSEST) and utilities.calculate_onedimensional_distance(self.environment.current_focus, position, horizontal=True) > x_closest: continue except TypeError: pass try: #checks on onewayclosest if (chunk_used_for_search.screen_y == utilities.VISIONONEWAYCLOSEST) and utilities.calculate_onedimensional_distance(self.environment.current_focus, position, horizontal=False) > y_closest: continue except TypeError: pass found_stim = self.environment.stimulus[each] visible_chunk = chunks.makechunk(nameofchunk="vis1", typename="_visuallocation", **{key: each[key] for key in self.environment.stimulus[each] if key != 'position' and key != 'text' and key != 'vis_delay'}) if visible_chunk <= chunk_used_for_search: temp_dict = visible_chunk._asdict() temp_dict.update({"screen_x":position[0], "screen_y":position[1]}) found = chunks.Chunk(utilities.VISUALLOCATION, **temp_dict) current_x = position[0] current_y = position[1] closest = utilities.calculate_pythagorian_distance(self.environment.current_focus, position) x_closest = utilities.calculate_onedimensional_distance(self.environment.current_focus, position, horizontal=True) y_closest = utilities.calculate_onedimensional_distance(self.environment.current_focus, position, horizontal=False) return found, found_stim
def procedural_process(self, start_time=0): """ Process that is carrying a production. Proceeds in steps: conflict resolution -> rule selection -> rule firing; or conflict resolution -> no rule found. Start_time specifies when production starts in discrete event simulation. """ time = start_time self.procs.append(self._PROCEDURAL,) self.__actrvariables = {} yield Event(roundtime(time), self._PROCEDURAL, 'CONFLICT RESOLUTION') max_utility = float("-inf") used_rulename = None self.used_rulename = None self.extra_tests = {} self.last_rule_slotvals = self.current_slotvals.copy() for rulename in self.ordered_rulenames: self.used_rulename = rulename production = self.rules[rulename]["rule"]() utility = self.rules[rulename]["utility"] pro = next(production) if self.model_parameters["subsymbolic"]: inst_noise = utilities.calculate_instantanoues_noise(self.model_parameters["utility_noise"]) utility += inst_noise if max_utility <= utility and self.LHStest(pro, self.__actrvariables.copy()): max_utility = utility used_rulename = rulename if not self.model_parameters["subsymbolic"] or not self.model_parameters["utility_noise"]: break #breaking after finding a rule, to speed up the process if used_rulename: self.used_rulename = used_rulename production = self.rules[used_rulename]["rule"]() self.rules.used_rulenames.setdefault(used_rulename, []).append(time) yield Event(roundtime(time), self._PROCEDURAL, 'RULE SELECTED: %s' % used_rulename) time = time + self.model_parameters["rule_firing"] yield Event(roundtime(time), self._PROCEDURAL, self._UNKNOWN) pro = next(production) if not self.LHStest(pro, self.__actrvariables.copy(), True): yield Event(roundtime(time), self._PROCEDURAL, 'RULE STOPPED FROM FIRING: %s' % used_rulename) else: if self.model_parameters["utility_learning"] and self.rules[used_rulename]["reward"] != None: utilities.modify_utilities(time, self.rules[used_rulename]["reward"], self.rules.used_rulenames, self.rules, self.model_parameters) self.rules.used_rulenames = {} compiled_rulename, re_created = self.compile_rules() self.compile = [] if re_created: yield Event(roundtime(time), self._PROCEDURAL, 'RULE %s: %s' % (re_created, compiled_rulename)) self.current_slotvals = {key: None for key in self.buffers} yield Event(roundtime(time), self._PROCEDURAL, 'RULE FIRED: %s' % used_rulename) try: yield from self.update(next(production), time) except utilities.ACTRError as e: raise utilities.ACTRError("The following rule is not defined correctly according to ACT-R: '%s'. The following error occured: %s" % (self.used_rulename, e)) if self.last_rule and self.last_rule != used_rulename: self.compile = [self.last_rule, used_rulename, self.last_rule_slotvals.copy()] self.last_rule_slotvals = {key: None for key in self.buffers} self.last_rule = used_rulename else: self.procs.remove(self._PROCEDURAL,) yield Event(roundtime(time), self._PROCEDURAL, 'NO RULE FOUND') return self.procs #returns processes activated by PROCEDURAL