def _createTheorem(self): # ask user to create theorem thName = "" thStr = "" ok = False while not ok: if not self._isAuto: print("Create a new theorem to start a session") thName = input("Theorem name:\n") if thName == "": print("Error in theorem name") continue thStr = input("Theorem content:\n") if thStr == "": print("Error in theorem content") continue ok = DarkLogic.makeTheorem(thName, thStr) if ok and self._mode == Mode.Human: print("-> getAction() : to print all possible actions") print( "-> pushAction(id) : to make action identified by id") print( "-> pushAction(ruleName, path) : to make action identified by ruleName (name of the rule to " "apply) and path " " (list of indexes [id1, id2, ..., idn]) in theorem ") print("-> popAction : to cancel the latest action") else: thm = self._dbThm.getRandomTheorem(self._player.elo()) self._eloThm = thm.elo() print(thm.name() + " theorem :'" + thm.content() + "' has been chosen for this game") ok = DarkLogic.makeTheorem(thm.name(), thm.content()) self._player.setTheoremInfo()
def humanDoubleNot(): DarkLogic.init(0) print("DoubleNot Demonstration") DarkLogic.makeTheorem("doubleNot", "a<=>!!a") DarkLogic.printTheorem() print("__________________________________________________") apply("FE,[1]") apply("arr,[0]") apply("arr,[0]") apply("arr_True") assert DarkLogic.isDemonstrated(), "DoubleNot theorem was not demonstrated"
def humanIdentity(): DarkLogic.init(0) print("Identity Demonstration") DarkLogic.makeTheorem("identity", "a<=>a") DarkLogic.printTheorem() print("__________________________________________________") apply("arr,[0]") apply("arr,[0]") apply("arr,[1]") apply("arr_True,[]") assert DarkLogic.isDemonstrated(), "identity theorem was not demonstrated"
def humanExcludedMiddle(): DarkLogic.init(0) print("ExcludedMiddle Demonstration") DarkLogic.makeTheorem("ExcludedMiddle", "p||!p") DarkLogic.printTheorem() print("__________________________________________________") apply("arr") apply("FI!") apply("||Ig_Annexe_0") apply("FE") apply("FI!_Annexe_7") apply("||Ig") apply("ax_Annexe_1") assert DarkLogic.isDemonstrated( ), "ExcludedMiddle theorem was not demonstrated"
def meditate(self): if len(self._storeNodes) > 0: self._gamesSinceLastLearning += 1 DarkLogic.makeTheorem(self._theoremName, self._theorem) # update if deepAI found a demonstration revNodes = self._storeNodes[::-1] if revNodes[0].value() < NeuralAI.MaxDepth: val = revNodes[0].value() for k in range(1, len(revNodes)): node = revNodes[k] node.setValue(val + k) self._db.export(self._storeNodes[0].getDbStates()) if self._gamesSinceLastLearning == NeuralAI.MaxGameBefLearning: self._train() self._gamesSinceLastLearning = 0 self._storeNodes.clear() DarkLogic.clearAll() super().meditate()
def parser(): DarkLogic.init(0) db = Database("Test/dbtest.csv") dbStates = db.getDatas() print("Total number of theorems in database: " + str(len(dbStates))) for dbState in dbStates.values(): thCreated = DarkLogic.makeTheorem(dbState.theoremName(), dbState.theoremContent()) assert thCreated, "Theorem name: " + dbState.theoremName() + ", " \ "content: " + dbState.theoremContent() + " has not been created" dlContent = DarkLogic.toStrTheorem() assert dlContent == dbState.theoremContent(), "Bad parsing! content in darklogic is '"\ + dlContent + "', but original was " + dbState.theoremContent()
def demonstration(self, name, content, nbThreads): print("Test AI on " + name + " theorem with " + str(nbThreads) + " cores") DarkLogic.init(nbThreads) ai = AI(nbThreads, 60) assert DarkLogic.makeTheorem( name, content), "cannot make " + name + " theorem" DarkLogic.printTheorem() start = time.perf_counter() while not DarkLogic.isOver(): action = ai.play() DarkLogic.getActions() print(ai.name() + " plays action with id " + str(action.id())) DarkLogic.apply(action.id()) DarkLogic.printTheorem() print( "____________________________________________________________________________" ) end = time.perf_counter() if DarkLogic.hasAlreadyPlayed(): if DarkLogic.isDemonstrated(): print(ai.name() + " won! " + ai.name() + " finished the demonstration!") elif DarkLogic.isAlreadyPlayed(): print(ai.name() + " lost! Repetition of theorem!") elif DarkLogic.isEvaluated(): print( ai.name() + " lost! Cannot (\"back-\")demonstrate that a theorem is false with implications" ) elif not DarkLogic.canBeDemonstrated(): print( ai.name() + " lost! This theorem cannot be demonstrated! " + "It can be true or false according to the values of its variables" ) else: if DarkLogic.isDemonstrated(): print("Game Over! the demonstration is already finished!") self._elapsed_seconds = end - start elif not DarkLogic.canBeDemonstrated(): print( "Game Over! This theorem cannot be demonstrated! " + "It can be true or false according to the values of its variables" ) self.pushEvent(Event.EventEnum.STOP)
def _train(self): x = [] y = [] print("DeepAI is preparing for training...") # node.getTrainNodes(x, y) dbStates = self._db.getDatas() nbExcludedTh = 0 class_nb = {} for cl in range(NeuralAI.MaxDepth + 1): class_nb[cl] = 0 print("Total number of theorems in database: " + str(len(dbStates))) dbStateIdx = -1 remDbStates = list(dbStates.values()) rand.shuffle(remDbStates) # NbMax = 200000 NbMax = 200000 if NbMax < len(dbStates): NbMaxUnevaluatedThm = NbMax - self._db.nbEvaluatedThm() if NbMax > self._db.nbEvaluatedThm() else 0 NbMaxEvaluatedThm = NbMax - NbMaxUnevaluatedThm else: NbMaxUnevaluatedThm = len(dbStates) - self._db.nbEvaluatedThm() NbMaxEvaluatedThm = self._db.nbEvaluatedThm() print("Must select " + str(NbMaxUnevaluatedThm) + " unevaluated theorems") print("Must select " + str(NbMaxEvaluatedThm) + " evaluated theorems") NbEvaluated = 0 NbUnevaluated = 0 lastEvalPrint = 0 lastUnevalPrint = 0 for dbState in remDbStates: dbStateIdx += 1 if NbEvaluated > lastEvalPrint and NbEvaluated % 10000 == 0: lastEvalPrint = NbEvaluated print(str(NbEvaluated) + " evaluated theorems have been seen") if NbUnevaluated > lastUnevalPrint and NbUnevaluated % 10000 == 0: lastUnevalPrint = NbUnevaluated print(str(NbUnevaluated) + " unevaluated theorems have been seen") DarkLogic.makeTheorem(dbState.theoremName(), dbState.theoremContent()) state = DarkLogic.getState() DarkLogic.clearAll() if len(state.operators()) > NeuralAI.NbOperators: if dbState.isEvaluated() and NbMaxEvaluatedThm == self._db.nbEvaluatedThm(): NbMaxUnevaluatedThm += 1 NbMaxEvaluatedThm -= 1 continue if dbState.isEvaluated(): if NbEvaluated == NbMaxEvaluatedThm: continue cl = dbState.value() if dbState.value() < NeuralAI.MaxDepth else NeuralAI.MaxDepth class_nb[cl] += 1 l = list(range(len(self._trueRuleStates))) rand.shuffle(l) x.append([makeTrueState(state), l]) # y.append(nthColounmOfIdentiy(cl)) y.append(cl) NbEvaluated += 1 if NbUnevaluated == NbMaxUnevaluatedThm and NbEvaluated == NbMaxEvaluatedThm: break else: if NbUnevaluated == NbMaxUnevaluatedThm: continue l = list(range(len(self._trueRuleStates))) rand.shuffle(l) x.append([makeTrueState(state), l]) # y.append(createZeroTab(DeepAI.MaxDepth + 1)) y.append(-1) NbUnevaluated += 1 if NbUnevaluated == NbMaxUnevaluatedThm and NbEvaluated == NbMaxEvaluatedThm: break print("Selected " + str(NbUnevaluated) + " unevaluated theorems") print("Selected " + str(NbEvaluated) + " evaluated theorems") # if we keep some examples if len(x): # check class_weight class_nb[-1] = 1 / NbUnevaluated print("Keep " + str(len(x)) + " examples") class_weights = {} for val in class_nb: nb_cl = class_nb[val] if nb_cl >= len(x) - 1: print("[WARNING] Useless to train if almost all examples are from one class! Exit") return if nb_cl != 0: class_weights[val] = 1 / nb_cl else: class_weights[val] = 0 # shuffle examples print("shuffle " + str(len(x)) + " examples ...") randList = list(range(len(x))) newX = [] newY = [] newValues = [] for pos in range(len(x)): newX.append(x[pos]) newY.append(y[pos]) x = newX y = newY values = newValues # prepare for training batch_size = 100 nb_epochs = 1000 pos = int(0.9 * len(x)) # x = np.array(x) # y = np.array(y) values = np.array(values) x_train = x[:pos] x_test = x[pos:] y_train = y[:pos] y_test = y[pos:] print("training on " + str(len(x_train)) + " examples") # earlyStop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=20, verbose=1) trainBatches_x = [] trainBatches_y = [] testBatches_x = [] testBatches_y = [] batch_x = [] batch_y = [] # prepare train batches for k in range(len(x_train)): batch_x.append(x_train[k]) batch_y.append(y_train[k]) if len(batch_x) == batch_size: trainBatches_x.append(batch_x) batch_x = [] trainBatches_y.append(batch_y) batch_y = [] if len(batch_x) > 0: trainBatches_x.append(batch_x) batch_x = [] trainBatches_y.append(batch_y) batch_y = [] # prepare test batches for k in range(len(x_test)): batch_x.append(x_test[k]) batch_y.append(y_test[k]) if len(batch_x) == batch_size: testBatches_x.append(batch_x) batch_x = [] testBatches_y.append(batch_y) batch_y = [] if len(batch_x) > 0: testBatches_x.append(batch_x) batch_x = [] testBatches_y.append(batch_y) batch_y = [] # fit lr = NeuralAI.INIT_LR minLoss = 10 ** 100 lastDecLoss = 0 # last epoch since loss has decreased # init minValLoss print("Validation of current model") if file_io.file_exists(NeuralAI.ModelFile): # load best model print("load last model") self._model = keras.models.load_model(NeuralAI.ModelFile) compileModel(self._model, lr) print("__________________________________________________________________________") crtMinValLoss, val_acc = validation(self._model, testBatches_x, testBatches_y, batch_size, class_weights, self._trueRuleStates, self._inputSize) print("VAL_LOSS = " + str(crtMinValLoss)) print("VAL_ACCURACY = " + str(val_acc)) minValLoss = 10 ** 100 lastDecValLoss = 0 # last epoch since loss has decreased print("create new model") self._model = createModel(len(self._trueRuleStates) + 1) compileModel(self._model, lr) for epoch in range(nb_epochs): print("epoch n°" + str(epoch + 1) + "/" + str(nb_epochs)) # training... loss, accuracy = training(self._model, trainBatches_x, trainBatches_y, batch_size, class_weights, self._trueRuleStates, self._inputSize) print("LOSS = " + str(loss)) print("ACCURACY = " + str(accuracy)) if loss < minLoss: print("LOSS decreasing!") minLoss = loss lastDecLoss = 0 else: print("LOSS increasing!") lastDecLoss += 1 # validation... val_loss, val_accuracy = validation(self._model, testBatches_x, testBatches_y, batch_size, class_weights, self._trueRuleStates, self._inputSize) print("VAL_LOSS = " + str(val_loss)) print("VAL_ACCURACY = " + str(val_accuracy)) if val_loss < minValLoss: print("VAL_LOSS decreasing") minValLoss = val_loss lastDecValLoss = 0 if minValLoss < crtMinValLoss: print("Improvement compared to old model!!!") crtMinValLoss = minValLoss else: print("VAL_LOSS increasing") lastDecValLoss += 1 if lastDecLoss == 3: lr = lr / 10 print("adapt learning rate: " + str(lr)) compileModel(self._model, lr) lastDecLoss = 0 minLoss = loss # keep latest loss for minimal loss print("new current minimal loss: "+str(minLoss)) if lastDecValLoss == 10: print("Early-stopping!") break if val_loss <= crtMinValLoss: print("Save model") self._model.save(NeuralAI.ModelFile) print("_______________________________________________________________________________________") if file_io.file_exists(NeuralAI.ModelFile): # load best model print("load best model") self._model = keras.models.load_model(NeuralAI.ModelFile) self._model = extractTestModel(self._model) print("_______________________________________________________________________________________")