def supervised_learning(s, readout, h_fc1, sess, train_step, socket,saver): #LOAD NET # saving and loading networks directory = 'LogGameFile' numOfLogGameFile = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))]) #NUM_OF_LEARNED_GAME = numOfLogGameFile print("START OF TRAINING BY SUPERVISED NETWORK") #first state for i in range (0,NUM_OF_LEARNED_GAME): nameLogFile =directory+"/F0000"[:-len(str(i))] + str(i) logFile = LogFile(nameLogFile) gameState = logFile.get_all_game_state() tranning_network(s, readout, h_fc1, sess, gameState,train_step, socket,saver,i) print("DONE SUPERVISED LEARNING") return
#LOAD NETWORK INCASE ALREADY LEARNING checkpoint = tf.train.get_checkpoint_state("saved_networks") if checkpoint and checkpoint.model_checkpoint_path: saver.restore(sess, checkpoint.model_checkpoint_path) print("Successfully loaded:", checkpoint.model_checkpoint_path) else: print("Could not find old network weights") print("Done loading network") while (1): string = input() values = string.split(" ") game = int(values[0]) stateIndex = int(values[1]) print("Game: " + str(game) + " State " + str(stateIndex)) directory = 'logCNN' nameLogFile = directory + "/logGameCNN" + str(game) logFile = LogFile(nameLogFile) listGameState = logFile.get_all_game_state() if (stateIndex > len(listGameState)): print("State out of Range") else: gameState = listGameState[stateIndex] print(gameState) calculate_value_game_state(input_layer, readout, h_fc1, sess, gameState) print("-NEXT-")