def new_situation_(N): situation=Situation(N) situation.SetChess(4,0) situation.SetChess(5,1) situation.SetChess(5,4) situation.SetChess(4,5) return situation
def new_situation(N): situation = Situation(N) situation.SetChess(6, 0) situation.SetChess(9, 3) situation.SetChess(9, 6) situation.SetChess(6, 9) return situation
def situationMapOrListFromJson(in_situationMapOrListJson): if isinstance(in_situationMapOrListJson, dict): return { k: Situation.fromJson(v) for k, v in in_situationMapOrListJson.items() } else: return [Situation.fromJson(v) for v in in_situationMapOrListJson]
def predict_test(): earley = Earley('a') earley.rules = [Rule('S#', 'S'), Rule('S', 'a')] earley.situations_dict[0] = set() earley.situations_dict[0].add(Situation('S#', 'S', 0, 0)) earley.predict(0) is_added = False to_add = Situation('S', 'a', 0, 0) for sit in earley.situations_dict[0]: if (sit.input == to_add.input and sit.output == to_add.output and sit.point == to_add.point and sit.ind == to_add.ind): is_added = True assert is_added is True print('Predict test passed\n')
def __init__(self, word): # добавляем (S# -> .S; 0) в D[0], инициализируем все D[i] sit = Situation('S#', 'S', 0, 0) self.situations_dict[0] = set() self.situations_dict[0].add(sit) for i in range(1, len(word) + 1): self.situations_dict[i] = set() self.word = word
def complete(self, list_number): situations_to_insert = [] for situation in self.situations_dict[list_number]: list_number_2 = situation.ind if situation.point == len(situation.output): for situation_2 in self.situations_dict[list_number_2]: sit = Situation(situation_2.input, situation_2.output, situation_2.ind, situation_2.point + 1) situations_to_insert.append(sit) for sit in situations_to_insert: self.add_situation(sit, list_number)
def format_data_to_json(raw_data): data = {'situations': [], 'actions': [], 'connectors': []} for child in raw_data: # TODO : Refactor style to config # TODO : Use pattern Strategy, Factory? if child.get('style', None) == "whiteSpace=wrap;html=1;aspect=fixed;": situation = Situation(child) if situation.is_valid: data['situations'].append(situation.as_json()) elif child.get('style', None) == "ellipse;whiteSpace=wrap;html=1;": action = Action(child) if action.is_valid: data['actions'].append(action.as_json()) elif child.get('style', None) is not None: connector = Connector(child) if connector.is_valid: data['connectors'].append(connector.as_json()) json_data = json.dumps(data, ensure_ascii=False) return json_data
def predict(self, list_number): situations_to_insert = [] for situation in self.situations_dict[list_number]: if situation.point < len(situation.output): unterminal = situation.output[ situation. point] # смотрим нетерминальный символ после точки for rule in self.rules: if rule.input == unterminal: # что выводится из этого нетерминала sit = Situation(unterminal, rule.output, list_number, 0) situations_to_insert.append(sit) for sit in situations_to_insert: self.add_situation(sit, list_number)
def addNewEvent(self, in_event): currentEventNr = len(self.m_events) self.m_events.append(in_event) # update neutral situations # - finished started situations when duration reaches situationDuration moveToCompleted = [ k for k in self.m_startedNeutralSituations if (currentEventNr - k) >= self.m_situationDuration ] newCompletedSituations = [ self.m_startedNeutralSituations[k] for k in moveToCompleted ] for k in moveToCompleted: situation = self.m_startedNeutralSituations.pop(k) situation.m_endEvent = situation.m_startEvent + self.m_situationDuration self.m_completedNeutralSituations[situation.m_endEvent] = situation # - start new neutral situation if detectionInterval reached if currentEventNr % self.m_situationDetectionInterval == 0: situation = Situation(currentEventNr) self.m_startedNeutralSituations[currentEventNr] = situation # - update completed neutral situations that fall outside the prediction interval # (predictive tags are added when emotinal situations are running) moveToPredicting = [ k for k in self.m_completedNeutralSituations if (currentEventNr - k) >= self.m_predictiveInterval ] newPredictingSituations = [ self.m_completedNeutralSituations[k] for k in moveToPredicting ] for k in moveToPredicting: situation = self.m_completedNeutralSituations.pop(k) self.m_predictingNeutralSituations.append(situation) # newCompletedSituations will need predicting # newPredictingSituations are new training examples return (newCompletedSituations, newPredictingSituations)
def registerActiveEmotions(self, in_emotionIDList): currentEventNr = len(self.m_events) # Start emotional situations when new IDs are active for eID in in_emotionIDList: if self.m_currentEmotionalSituations.get(eID) == None: self.m_currentEmotionalSituations[eID] = Situation( currentEventNr, eID) # Add predicting tags to completed neutral situations (predicts start of emotional situations) for situation in self.m_completedNeutralSituations.values(): situation.m_predictiveMap[eID] = 1 # Also add tag to non-completed situations if configured to do so if self.m_predictFromNonCompletedSituations: for situation in self.m_startedNeutralSituations.values(): situation.m_predictiveMap[eID] = 1 # Stop emotional situations who's IDs are no longer active moveToFinished = [ k for k in self.m_currentEmotionalSituations if in_emotionIDList.count(k) == 0 ] for k in moveToFinished: situation = self.m_currentEmotionalSituations.pop(k) situation.m_endEvent = currentEventNr - 1 self.m_finishedEmotionalSituations.append(situation)
def initialize(self): self.situation=Situation(self.parent) self.situation.set_intelligence(self) self.situation_probabilities = []
class Intelligence(): ## Historique des actions et environnement precedents last_action = 'walk' last_index = 0 archive=[] def __init__(self,parent): self.parent = parent self.initialize() ############################################################################################################################ # Initialise les données : ############################################################################################################################ def initialize(self): self.situation=Situation(self.parent) self.situation.set_intelligence(self) self.situation_probabilities = [] ############################################################################################################################ # Cherche une situation dans les situations rencontrées précédemment : ############################################################################################################################ def search_situation(self,environnement): index = 0 for index in range(len(self.situation_probabilities)): if (self.situation.is_same(self.situation_probabilities[index],environnement)): return index return -1 ############################################################################################################################ # trouve la situation la plus proche : ############################################################################################################################ def closest_situation(self,environnement): # indices encore considérés current_table = range(len(self.situation_probabilities)) next_table = [] # Table réduite index = 0 ## Recursivite ## while (index < len(environnement)): #On regarde si l'obstacle est le même for i in current_table: if (environnement[index][0] == self.situation_probabilities[i][0][index][0]): next_table.append(i) # On teste qu'on a encore des éléments à départager au premier obstacle : if ((len(next_table) == 0) and (index ==0)): return -1 # On teste qu'on a encore des éléments à partager aux obstacles suivants sinon on renvoie le premier if ((len(next_table) == 0)): return current_table[0] current_table = next_table next_table = [] ## Element trouve ## if (len(current_table) == 1): return current_table[0] # On regarde la distance à l'obstacle # for i in current_table: best = 2000 dist = abs(environnement[index][1] - self.situation_probabilities[i][0][index][1]) if (dist < best): best == dist next_table = [i] if (dist == best): next_table.append(i) current_table = next_table next_table = [] # De nouveau un test sur les éléments à départager if (len(current_table) == 1): return current_table[0] # On regarde la largeur de l'obstacle # for i in current_table: best = 2000 dist = abs(environnement[index][2] - self.situation_probabilities[i][0][index][2]) if (dist < best): best == dist next_table = [i] if (dist == best): next_table.append(i) # De nouveau un test sur les éléments à départager if (len(current_table) == 1): return current_table[0] # On regarde la hauteur de l'obstacle # for i in current_table: best = 2000 dist = abs(environnement[index][3] - self.situation_probabilities[i][0][index][3]) if (dist < best): best == dist next_table = [i] if (dist == best): next_table.append(i) # De nouveau un test sur les éléments à départager if (len(current_table) == 1): return current_table[0] return current_table[0] ############################################################################################################################ # trouve la situation la plus proche : ############################################################################################################################ ## Fonction qui sert à choisir ce que va faire le Mario ## def elect_move(self,situation): number = random.random() forbidden = self.control.is_forbidden(situation[0]) # Rien n'est autorisé : on recommence le niveau if 'walk' in forbidden and 'jump' in forbidden : self.control.restart() self.debrief('failure',2,5) elif 'walk' in forbidden: self.last_action = 'jump' elif 'jump' in forbidden: self.last_action = 'walk' # Choix selon les probabilités: tout est autorisé # else: if (number < situation[PA] and not('walk' in forbidden)): self.last_action = 'walk' elif (number < situation[PA] + situation[PS] and not('jump' in forbidden)): self.last_action = 'jump' else: self.last_action = 'spin' self.archive.append([self.last_index,self.last_action,0]) # On rajoute l'element a l'historique self.control.action(self.last_action) ############################################################################################################################ # Creation d'une situation inexistante ############################################################################################################################ def create_situation(self,situation): self.situation_probabilities.append(situation) self.control.update_data() ############################################################################################################################ # # Memorisation et intelligence # ############################################################################################################################ ## Mise à jour des probabilités d'une situation ## def update_probabilities(self, index, result, action, strength=1): if (result == 'neutral'): pass # Si l'action est réussie if (result == 'success'): if (action == 'walk'): self.situation.reset_probabilities(index,'Pa', strength) if (action == 'spin'): self.situation.reset_probabilities(index,'Pr', strength) if (action == 'jump'): self.situation.reset_probabilities(index,'Ps', strength) #Si l'action est échouée if (result == 'failure'): if (action == 'walk'): self.situation.reset_probabilities(index,'Pa', strength) if (action == 'spin'): self.situation.reset_probabilities(index,'Pr', strength) if (action == 'jump'): self.situation.reset_probabilities(index,'Ps', strength) ## Ici, on implémente la fonction qui va effectuer l'apprentissage ## def learning(self): # On récupère l'environnement courant dans un premier temps environnement = self.data.next_obstacle() # Si la situation n'a pas encore été rencontrée, alors on crée la situation self.last_index = self.search_situation(environnement) if (self.last_index == -1): nearest = self.closest_situation(environnement) if (nearest == -1): new_sit = [environnement, PROB_AI,PROB_JI,PROB_SI] else: new_sit = [environnement,(self.situation_probabilities[nearest][PA]+ PROB_AI)/2,(self.situation_probabilities[nearest][PS]+ PROB_JI)/2,(self.situation_probabilities[nearest][PR]+ PROB_SI)/2] self.create_situation(new_sit) self.last_index = len(self.situation_probabilities) -1 # Prends en compte un resultat d'action if self.control.ia_set: self.elect_move(self.situation_probabilities[self.last_index]) ## met a jour l'association action environnement via le resultat obtenu lors du dernier choix ## def remember(self,result): # On apprend en fonction du résultat de l'action self.update_probabilities(self.last_index,result,self.last_action) # S'il y a eu un changement de situation lors du dernier mouvement # if len(self.archive) <> 0: if self.situation_probabilities[self.last_index][0][0][0] <> self.data.next_obstacle()[0][0]: self.archive[-1][2] = 1 ## Prise en compte d'un succes ou echec général ## def debrief(self,result,nbr=-1,strength = 1): updated = 0 index = -1 # On actualise les derniers mouvments a ponderer # if nbr < 0: nbr = len(self.archive) while (index > - len(self.archive)) and (updated < nbr): if self.archive[index][2] == 1: self.update_probabilities(self.archive[index][0],result,self.archive[index][1],0.2*strength) nbr = nbr + 1 index = index - 1 ############################################################################################################################ # Fonctions générales ############################################################################################################################ def set_control(self,control): # Definit le controleur du jeu self.control=control def set_data(self,data): # Définit le Data du jeu self.data = data
def scan(self, list_number, symbol): for situation in self.situations_dict[list_number]: if situation.output[situation.point] == symbol: sit = Situation(situation.input, situation.output, situation.ind, situation.point + 1) self.add_situation(sit, list_number + 1)