def __init__(self, genes): self.beta = Config.readFloat(self, "Network", "beta") self.delta = Config.readFloat(self, "Network", "delta") self.mode = Config.read(self, "Network", "mode") self.genes = genes self.outputs = [] self.outputCount = 0
def __init__(self, network, genomeClass, taskClass, runs, elitism): configParser = configparser.ConfigParser() configParser.read("config.ini") self.popSize = int( configParser[("Evolver." + type(self).__name__).upper()]['populationSize']) self.mutationPortion = float( configParser[("Evolver." + type(self).__name__).upper()]['mutationPortion']) self.complexificationRate = float(configParser[( "Evolver." + type(self).__name__).upper()]['complexificationRate']) self.showOutput = configParser[( "Evolver." + type(self).__name__).upper()]['showOutput'] if self.showOutput == "True": self.showOutput = True else: self.showOutput = False self.pop = [] # population of whole genomes self.genomeClass = genomeClass self.taskClass = taskClass self.runs = runs self.elitism = elitism self.grn = network self.mode = Config.read(self, "Evolver", "mode") # self.saveBestModel = Config.read(self, "Evolver", "saveBestModel") self.fitnessLog = [ ] # We track the average fitness of the population for a few generations to know when we should complexify the network print("ARNF: Creating a population of ARNs with size: {}".format( self.popSize)) for i in range(self.popSize): individual = self.genomeClass() individual.initialize(self.taskClass().requirements()) self.pop.append(individual)
def __init__(self): self.beta = Config.readFloat(self, "Network", "beta") self.delta = Config.readFloat(self, "Network", "delta") self.minIn = Config.readFloat(self, "Network", "minIn") self.minOut = Config.readFloat(self, "Network", "minOut") self.maxIn = Config.readFloat(self, "Network", "maxIn") self.maxOut = Config.readFloat(self, "Network", "maxOut") self.intMode = Config.read(self, "Network", "intMode") self.genes = None
def __init__(self): configParser = configparser.ConfigParser() configParser.read("config.ini") self.outputCount = int( configParser[("Task." + type(self).__name__).upper()]['outputCount']) self.networkTime = int( configParser[("Task." + type(self).__name__).upper()]['networkTime']) self.grn = None self.showOutput = Config.readBool(self, "Task", "showOutput") pass
from keras.regularizers import l2 import sys import os import time import random from Qnetwork import Qnetwork from ExperienceReplay import ExperienceReplay from Utility import Utility from Utility import Config env = gym.make('GazeboTurtlebotMazeColor-v0') observation = env.reset #set parameter config = Config() config.path = "./DQN_maze_target_v9" if not os.path.exists(config.path): os.makedirs(config.path) config.loadOldFile() config.saveOldFile() config.load_model = False config.pre_train_step = 1000 config.epsilon_decay = 1.0 / 1000 config.gamma = 0.99 network = Qnetwork(env.num_state, env.num_action) replay = ExperienceReplay(config.path) utility = Utility(config.path + config.reward_file, config.path + config.step_file)
def __init__(self): self.config = Config.Config() self.log = Log.Logs
def __init__(self): self.beta = Config.readFloat(self, "Network", "beta") self.delta = Config.readFloat(self, "Network", "delta") self.genes = None self.mode = Config.read(self, "Network", "mode") self.outputs = []