def initializeExploration(self, filename="exploration.hdf"): """Initialize the pypet environment :param filename: hdf filename to store the results in , defaults to "exploration.hdf" :type filename: str, optional """ # create hdf file path if it does not exist yet pathlib.Path(paths.HDF_DIR).mkdir(parents=True, exist_ok=True) # set default hdf filename self.HDF_FILE = os.path.join(paths.HDF_DIR, filename) # initialize pypet environment trajectoryName = "results" + datetime.datetime.now().strftime( "-%Y-%m-%d-%HH-%MM-%SS") trajectoryfilename = self.HDF_FILE nprocesses = multiprocessing.cpu_count() logging.info("Number of processes: {}".format(nprocesses)) # set up the pypet environment env = pypet.Environment( trajectory=trajectoryName, filename=trajectoryfilename, multiproc=True, ncores=nprocesses, complevel=9, # log_stdout=False, # log_config=None, # report_progress=True, # log_multiproc=False, ) self.env = env # Get the trajectory from the environment self.traj = env.trajectory self.trajectoryName = self.traj.v_name # Add all parameters to the pypet trajectory if self.model is not None: # if a model is specified, use the default parameter of the # model to initialize pypet self.addParametersToPypet(self.traj, self.model.params) else: # else, use a random parameter of the parameter space self.addParametersToPypet(self.traj, self.parameterSpace.getRandom(safe=True)) # Tell pypet which parameters to explore self.pypetParametrization = pypet.cartesian_product( self.exploreParameters) logging.info("Number of parameter configurations: {}".format( len(self.pypetParametrization[list( self.pypetParametrization.keys())[0]]))) self.traj.f_explore(self.pypetParametrization) # initialization done logging.info("BoxSearch: Environment initialized.") self.initialized = True
def _initializeExploration(self, filename="exploration.hdf"): """Initialize the pypet environment :param filename: hdf filename to store the results in , defaults to "exploration.hdf" :type filename: str, optional """ # create hdf file path if it does not exist yet pathlib.Path(paths.HDF_DIR).mkdir(parents=True, exist_ok=True) # set default hdf filename self.HDF_FILE = os.path.join(paths.HDF_DIR, filename) # initialize pypet environment trajectoryName = "results" + datetime.datetime.now().strftime( "-%Y-%m-%d-%HH-%MM-%SS") trajectoryfilename = self.HDF_FILE # set up the pypet environment env = pypet.Environment( trajectory=trajectoryName, filename=trajectoryfilename, multiproc=True, ncores=self.ncores, complevel=9, log_config=paths.PYPET_LOGGING_CONFIG, ) self.env = env # Get the trajectory from the environment self.traj = env.trajectory self.trajectoryName = self.traj.v_name # Add all parameters to the pypet trajectory if self.model is not None: # if a model is specified, use the default parameter of the # model to initialize pypet self._addParametersToPypet(self.traj, self.model.params) else: # else, use a random parameter of the parameter space self._addParametersToPypet( self.traj, self.parameterSpace.getRandom(safe=True)) # Tell pypet which parameters to explore self.pypetParametrization = pypet.cartesian_product( self.exploreParameters) # explicitely add all parameters within star notation, hence unwrap star notation into actual params names if self.parameterSpace.star: assert self.model is not None, "With star notation, model cannot be None" self.pypetParametrization = unwrap_star_dotdict( self.pypetParametrization, self.model) self.nRuns = len(self.pypetParametrization[list( self.pypetParametrization.keys())[0]]) logging.info(f"Number of parameter configurations: {self.nRuns}") self.traj.f_explore(self.pypetParametrization) # initialization done logging.info("BoxSearch: Environment initialized.") self.initialized = True
def initializeExploration(self, fileName="exploration.hdf"): # ---- initialize pypet environment ---- trajectoryName = "results" + datetime.datetime.now().strftime("-%Y-%m-%d-%HH-%MM-%SS") self.HDF_FILE = os.path.join(paths.HDF_DIR, fileName) trajectoryFileName = self.HDF_FILE nprocesses = multiprocessing.cpu_count() logging.info("Number of processes: {}".format(nprocesses)) # set up the pypet environment env = pypet.Environment(trajectory=trajectoryName, filename=trajectoryFileName, multiproc=True, ncores=nprocesses, complevel=9, log_stdout=False) self.env = env # Get the trajectory from the environment self.traj = env.trajectory self.trajectoryName = self.traj.v_name # Add all parameters to the pypet trajectory self.addParametersToPypet(self.traj, self.model.params) # Tell pypet which parameters to explore self.traj.f_explore(self.pypetParametrization) # initialization done logging.info("Pypet environment initialized.") self.initialized = True
def __init__( self, evalFunction, parameterSpace, weightList=None, model=None, filename="evolution.hdf", ncores=None, POP_INIT_SIZE=100, POP_SIZE=20, NGEN=10, algorithm="adaptive", matingOperator=None, MATE_P=None, mutationOperator=None, MUTATE_P=None, selectionOperator=None, SELECT_P=None, parentSelectionOperator=None, PARENT_SELECT_P=None, individualGenerator=None, IND_GENERATOR_P=None, ): """Initialize evolutionary optimization. :param evalFunction: Evaluation function of a run that provides a fitness vector and simulation outputs :type evalFunction: function :param parameterSpace: Parameter space to run evolution in. :type parameterSpace: `neurolib.utils.parameterSpace.ParameterSpace` :param weightList: List of floats that defines the dimensionality of the fitness vector returned from evalFunction and the weights of each component for multiobjective optimization (positive = maximize, negative = minimize). If not given, then a single positive weight will be used, defaults to None :type weightList: list[float], optional :param model: Model to simulate, defaults to None :type model: `neurolib.models.model.Model`, optional :param filename: HDF file to store all results in, defaults to "evolution.hdf" :type filename: str, optional :param ncores: Number of cores to simulate on (max cores default), defaults to None :type ncores: int, optional :param POP_INIT_SIZE: Size of first population to initialize evolution with (random, uniformly distributed), defaults to 100 :type POP_INIT_SIZE: int, optional :param POP_SIZE: Size of the population during evolution, defaults to 20 :type POP_SIZE: int, optional :param NGEN: Numbers of generations to evaluate, defaults to 10 :type NGEN: int, optional :param matingOperator: Custom mating operator, defaults to deap.tools.cxBlend :type matingOperator: deap operator, optional :param MATE_P: Mating operator keyword arguments (for the default crossover operator cxBlend, this defaults `alpha` = 0.5) :type MATE_P: dict, optional :param mutationOperator: Custom mutation operator, defaults to du.gaussianAdaptiveMutation_nStepSizes :type mutationOperator: deap operator, optional :param MUTATE_P: Mutation operator keyword arguments :type MUTATE_P: dict, optional :param selectionOperator: Custom selection operator, defaults to du.selBest_multiObj :type selectionOperator: deap operator, optional :param SELECT_P: Selection operator keyword arguments :type SELECT_P: dict, optional :param parentSelectionOperator: Operator for parent selection, defaults to du.selRank :param PARENT_SELECT_P: Parent selection operator keyword arguments (for the default operator selRank, this defaults to `s` = 1.5 in Eiben&Smith p.81) :type PARENT_SELECT_P: dict, optional :param individualGenerator: Function to generate initial individuals, defaults to du.randomParametersAdaptive """ if weightList is None: logging.info( "weightList not set, assuming single fitness value to be maximized." ) weightList = [1.0] trajectoryName = "results" + datetime.datetime.now().strftime( "-%Y-%m-%d-%HH-%MM-%SS") logging.info(f"Trajectory Name: {trajectoryName}") self.HDF_FILE = os.path.join(paths.HDF_DIR, filename) trajectoryFileName = self.HDF_FILE logging.info("Storing data to: {}".format(trajectoryFileName)) logging.info("Trajectory Name: {}".format(trajectoryName)) if ncores is None: ncores = multiprocessing.cpu_count() logging.info("Number of cores: {}".format(ncores)) # initialize pypet environment # env = pp.Environment(trajectory=trajectoryName, filename=trajectoryFileName) env = pp.Environment( trajectory=trajectoryName, filename=trajectoryFileName, use_pool=False, multiproc=True, ncores=ncores, complevel=9, log_config=paths.PYPET_LOGGING_CONFIG, ) # Get the trajectory from the environment traj = env.traj # Sanity check if everything went ok assert ( trajectoryName == traj.v_name ), f"Pypet trajectory has a different name than trajectoryName {trajectoryName}" # trajectoryName = traj.v_name self.model = model self.evalFunction = evalFunction self.weightList = weightList self.NGEN = NGEN assert POP_SIZE % 2 == 0, "Please chose an even number for POP_SIZE!" self.POP_SIZE = POP_SIZE assert POP_INIT_SIZE % 2 == 0, "Please chose an even number for POP_INIT_SIZE!" self.POP_INIT_SIZE = POP_INIT_SIZE self.ncores = ncores # comment string for storing info self.comments = "no comments" self.traj = env.traj self.env = env self.trajectoryName = trajectoryName self.trajectoryFileName = trajectoryFileName self._initialPopulationSimulated = False # -------- settings self.verbose = False self.plotColor = "C0" # -------- simulation self.parameterSpace = parameterSpace self.ParametersInterval = parameterSpace.named_tuple_constructor self.paramInterval = parameterSpace.named_tuple self.toolbox = deap.base.Toolbox() # -------- algorithms if algorithm == "adaptive": logging.info(f"Evolution: Using algorithm: {algorithm}") self.matingOperator = tools.cxBlend self.MATE_P = {"alpha": 0.5} or MATE_P self.mutationOperator = du.gaussianAdaptiveMutation_nStepSizes self.selectionOperator = du.selBest_multiObj self.parentSelectionOperator = du.selRank self.PARENT_SELECT_P = {"s": 1.5} or PARENT_SELECT_P self.individualGenerator = du.randomParametersAdaptive elif algorithm == "nsga2": logging.info(f"Evolution: Using algorithm: {algorithm}") self.matingOperator = tools.cxSimulatedBinaryBounded self.MATE_P = { "low": self.parameterSpace.lowerBound, "up": self.parameterSpace.upperBound, "eta": 20.0, } or MATE_P self.mutationOperator = tools.mutPolynomialBounded self.MUTATE_P = { "low": self.parameterSpace.lowerBound, "up": self.parameterSpace.upperBound, "eta": 20.0, "indpb": 1.0 / len(self.weightList), } or MUTATE_P self.selectionOperator = tools.selNSGA2 self.parentSelectionOperator = tools.selTournamentDCD self.individualGenerator = du.randomParameters else: raise ValueError( "Evolution: algorithm must be one of the following: ['adaptive', 'nsga2']" ) # if the operators are set manually, then overwrite them self.matingOperator = (self.matingOperator if hasattr( self, "matingOperator") else matingOperator) self.mutationOperator = (self.mutationOperator if hasattr( self, "mutationOperator") else mutationOperator) self.selectionOperator = (self.selectionOperator if hasattr( self, "selectionOperator") else selectionOperator) self.parentSelectionOperator = ( self.parentSelectionOperator if hasattr( self, "parentSelectionOperator") else parentSelectionOperator) self.individualGenerator = (self.individualGenerator if hasattr( self, "individualGenerator") else individualGenerator) # let's also make sure that the parameters are set correctly self.MATE_P = self.MATE_P if hasattr(self, "MATE_P") else {} self.PARENT_SELECT_P = (self.PARENT_SELECT_P if hasattr( self, "PARENT_SELECT_P") else {}) self.MUTATE_P = self.MUTATE_P if hasattr(self, "MUTATE_P") else {} self.SELECT_P = self.SELECT_P if hasattr(self, "SELECT_P") else {} self.initDEAP( self.toolbox, self.env, self.paramInterval, self.evalFunction, weightList=self.weightList, matingOperator=self.matingOperator, mutationOperator=self.mutationOperator, selectionOperator=self.selectionOperator, parentSelectionOperator=self.parentSelectionOperator, individualGenerator=self.individualGenerator, ) # set up pypet trajectory self.initPypetTrajectory( self.traj, self.paramInterval, self.POP_SIZE, self.NGEN, self.model, ) # population history: dict of all valid individuals per generation self.history = {} # initialize population self.evaluationCounter = 0 self.last_id = 0
def __init__( self, evalFunction, parameterSpace, weightList=None, model=None, filename="evolution.hdf", ncores=None, POP_INIT_SIZE=100, POP_SIZE=20, NGEN=10, matingFunction=None, CXP=0.5, selectionFunction=None, RANKP=1.5, ): """Initialize evolutionary optimization. :param evalFunction: Evaluation function of a run that provides a fitness vector and simulation outputs :type evalFunction: function :param parameterSpace: Parameter space to run evolution in. :type parameterSpace: `neurolib.utils.parameterSpace.ParameterSpace` :param weightList: List of floats that defines the dimensionality of the fitness vector returned from evalFunction and the weights of each component for multiobjective optimization (positive = maximize, negative = minimize). If not given, then a single positive weight will be used, defaults to None :type weightList: list[float], optional :param model: Model to simulate, defaults to None :type model: `neurolib.models.model.Model`, optional :param filename: HDF file to store all results in, defaults to "evolution.hdf" :type filename: str, optional :param ncores: Number of cores to simulate on (max cores default), defaults to None :type ncores: int, optional :param POP_INIT_SIZE: Size of first population to initialize evolution with (random, uniformly distributed), defaults to 100 :type POP_INIT_SIZE: int, optional :param POP_SIZE: Size of the population during evolution, defaults to 20 :type POP_SIZE: int, optional :param NGEN: Numbers of generations to evaluate, defaults to 10 :type NGEN: int, optional :param matingFunction: Custom mating function, defaults to blend crossover if not set., defaults to None :type matingFunction: function, optional :param CXP: Parameter handed to the mating function (for blend crossover, this is `alpha`), defaults to 0.5 :type CXP: float, optional :param selectionFunction: Custom parent selection function, defaults to rank selection if not set., defaults to None :type selectionFunction: function, optional :param RANKP: Parent selection parameter (for rank selection, this is `s` in Eiben&Smith p.81), defaults to 1.5 :type RANKP: float, optional """ if weightList is None: logging.info( "weightList not set, assuming single fitness value to be maximized." ) weightList = [1.0] trajectoryName = "results" + datetime.datetime.now().strftime( "-%Y-%m-%d-%HH-%MM-%SS") self.HDF_FILE = os.path.join(paths.HDF_DIR, filename) trajectoryFileName = self.HDF_FILE logging.info("Storing data to: {}".format(trajectoryFileName)) logging.info("Trajectory Name: {}".format(trajectoryName)) if ncores is None: ncores = multiprocessing.cpu_count() logging.info("Number of cores: {}".format(ncores)) # initialize pypet environment # env = pp.Environment(trajectory=trajectoryName, filename=trajectoryFileName) env = pp.Environment( trajectory=trajectoryName, filename=trajectoryFileName, use_pool=False, multiproc=True, ncores=ncores, log_stdout=False, log_multiproc=False, complevel=9, ) # Get the trajectory from the environment traj = env.traj # Sanity check if everything went ok assert ( trajectoryName == traj.v_name ), f"Pypet trajectory has a different name than trajectoryName {trajectoryName}" # trajectoryName = traj.v_name self.model = model self.evalFunction = evalFunction self.weightList = weightList self.CXP = CXP self.RANKP = RANKP self.NGEN = NGEN assert POP_SIZE % 2 == 0, "Please chose an even number for POP_SIZE!" self.POP_SIZE = POP_SIZE assert POP_INIT_SIZE % 2 == 0, "Please chose an even number for POP_INIT_SIZE!" self.POP_INIT_SIZE = POP_INIT_SIZE self.ncores = ncores # comment string for storing info self.comments = "no comments" self.traj = env.traj self.env = env self.trajectoryName = trajectoryName self.trajectoryFileName = trajectoryFileName self._initialPopulationSimulated = False # -------- settings self.verbose = False # -------- simulation self.parameterSpace = parameterSpace self.ParametersInterval = parameterSpace.named_tuple_constructor self.paramInterval = parameterSpace.named_tuple self.toolbox = deap.base.Toolbox() if matingFunction is None: # this is our custom uniform mating function # matingFunction = du.cxUniform_adapt # this is blend crossover (with alpha) matingFunction = tools.cxBlend self.matingFunction = matingFunction if selectionFunction is None: selectionFunction = du.selRank self.selectionFunction = selectionFunction self.initDEAP( self.toolbox, self.env, self.paramInterval, self.evalFunction, weights_list=self.weightList, matingFunction=self.matingFunction, selectionFunction=self.selectionFunction, ) # set up pypet trajectory self.initPypetTrajectory( self.traj, self.paramInterval, self.POP_SIZE, self.CXP, self.NGEN, self.model, ) # population history: dict of all valid individuals per generation self.popHist = {} # initialize population self.evaluationCounter = 0 self.last_id = 0
def __init__( self, evalFunction, parameterSpace, weightList=None, model=None, hdf_filename="evolution.hdf", ncores=None, POP_INIT_SIZE=100, POP_SIZE=20, NGEN=10, CXPB=0.04, ): """ :param model: Model to run :type model: Model :param evalFunction: Evaluation function of a run that provides a fitness vector and simulation outputs :type evalFunction: Function shuold retiurn a tuple of the form (fitness_tuple, model.output) :param weightList: List of floats that defines the dimensionality of the fitness vector returned from evalFunction and the weights of each component (positive = maximize, negative = minimize) :param hdf_filename: HDF file to store all results in (data/hdf/evolution.hdf default) :param ncores: Number of cores to simulate on (max cores default) :param POP_INIT_SIZE: Size of first population to initialize evolution with (random, uniformly distributed) :param POP_SIZE: Size of the population during evolution :param NGEN: Numbers of generations to evaluate :param CXPB: Crossover probability of each individual gene """ if weightList is None: logging.info( "weightList not set, assuming single fitness value to be maximized." ) weightList = [1.0] trajectoryName = "results" + datetime.datetime.now().strftime( "-%Y-%m-%d-%HH-%MM-%SS") self.HDF_FILE = os.path.join(paths.HDF_DIR, hdf_filename) trajectoryFileName = self.HDF_FILE logging.info("Storing data to: {}".format(trajectoryFileName)) logging.info("Trajectory Name: {}".format(trajectoryName)) if ncores is None: ncores = multiprocessing.cpu_count() logging.info("Number of cores: {}".format(ncores)) # initialize pypet environment # env = pp.Environment(trajectory=trajectoryName, filename=trajectoryFileName) env = pp.Environment( trajectory=trajectoryName, filename=trajectoryFileName, use_pool=False, multiproc=True, ncores=ncores, complevel=9, ) # Get the trajectory from the environment traj = env.traj # Sanity check if everything went ok assert ( trajectoryName == traj.v_name ), f"Pypet trajectory has a different name than trajectoryName {trajectoryName}" # trajectoryName = traj.v_name self.model = model self.evalFunction = evalFunction self.weightList = weightList self.CXPB = CXPB self.NGEN = NGEN assert POP_SIZE % 2 == 0, "Please chose an even number for POP_SIZE!" self.POP_SIZE = POP_SIZE assert POP_INIT_SIZE % 2 == 0, "Please chose an even number for POP_INIT_SIZE!" self.POP_INIT_SIZE = POP_INIT_SIZE self.ncores = ncores self.traj = env.traj self.env = env self.trajectoryName = trajectoryName self.trajectoryFileName = trajectoryFileName self._initialPopulationSimulated = False # settings self.verbose = False # environment parameters self.evaluationCounter = 0 # simulation parameters self.parameterSpace = parameterSpace self.ParametersInterval = parameterSpace.named_tuple_constructor self.paramInterval = parameterSpace.named_tuple self.toolbox = deap.base.Toolbox() self.initDEAP( self.toolbox, self.env, self.paramInterval, self.evalFunction, weights_list=self.weightList, ) # set up pypet trajectory self.initPypetTrajectory( self.traj, self.paramInterval, self.ParametersInterval, self.POP_SIZE, self.CXPB, self.NGEN, self.model, ) # initialize population self.last_id = 0 self.pop = self.toolbox.population(n=self.POP_INIT_SIZE) # self.pop = self.tagPopulation(self.pop) # will do this in initial run # population history: dict of all valid individuals per generation self.popHist = {}
dataset_path='/Users/raphaelholca/Documents/data-sets/MNIST', pad_size=(parameter_dict['conv_filter_side'] - 1) / 2, load_test=True, cross_validate='search') """ create directory to save data """ parameter_dict['pypet'] = True parameter_dict['verbose'] = 0 parameter_dict['pypet_name'] = parameter_dict['name'] save_path = os.path.join('output', parameter_dict['name']) pp.check_dir(save_path, overwrite=False) print_dict = parameter_dict.copy() print_dict.update(explore_dict) """ create pypet environment """ env = pypet.Environment(trajectory='explore_perf', log_stdout=False, add_time=False, multiproc=True, ncores=12, filename=os.path.join(save_path, 'explore_perf.hdf5')) traj = env.v_trajectory pp.add_parameters(traj, parameter_dict) explore_dict = pypet.cartesian_product( explore_dict, tuple(explore_dict.keys()) ) #if not all entry of dict need be explored through cartesian product replace tuple(.) only with relevant dict keys in tuple explore_dict['name'] = pp.set_run_names(explore_dict, parameter_dict['name']) traj.f_explore(explore_dict) """ launch simulation with pypet for parameter exploration """ tic = time.time() env.f_run(pp.launch_exploration, images_train, labels_train, images_test,