def bop_elites(max_evals=me, unknown_features=False, preload=None, n_initial_samples=20): '''bop_elites (THIS IS V0.9 RUNNING ON MULTIDIMENSIONAL DETERMINISTIC FEATURES ) bop-elites performs Bayesian Optimisation of Phenotypic Elites by means of the ensemble expected improvement algorithm. On domains where the features are deterministic and calculable this simplifies to the classic expected improvement algorithm. Example: output_predictions = bop_elites( max_evals = 500 ) Inputs (arguments): max_evals - [ Integer ] - Evaluation budget unknown_features - [ Boolean ] - Flag to activate Ensemble EI preload - [ Array ] - Preloaded evaluations n_initial_samples - [ Integer ] - Number of samples to initialise Inputs (from config file): mp.domain - [ Domain Class ] - Domain specific information mp.map - [ Map Class ] - Global map (for map-elites) mp - [ Script ] - Contains many variables Outputs map - [ Map Class ] - Final optimised map objvallist - [ List ] - Objective values of points in map Code Author: Paul Kent Warwick University email: [email protected] Oct 2020; Last revision: 20-01-2021 ''' ss.myseed = seed ss.initseed(ss.myseed) print('starting experiment with seed: ', seed, ' niche_size: ', niche_size) ## Initialise main map mp.feature_resolution = [niche_size, niche_size] mp.map = create_map(mp.feature_resolution, mp.domain, GMLM=GMLM) ## Set up for Data-Collection DC_training_times = [] DC_niches_filled = [] ## Create a Readme file and the directory for storage mydir, logger = readme(domain_flag, n_initial_samples, max_evals, seed, mp.penalty, info, optn) printinfo('intro', max_evals, n_initial_samples, domain_flag, seed, logger=logger) ## pretty print to terminal ## Set the Fitness and feature functions from config file ofun = mp.fitness_fun ffun = mp.feature_fun #### Initial sampling if preload == None: observations = initial_sampling(n_initial_samples) printinfo(type='sampling', logger=logger) else: directory = preload #Must be the parent directory pointfile = open(directory + 'points.b', 'rb') data = pickle.load(pointfile) observations = data nanfile = open(directory + 'nanpoints.b', 'rb') nandata = pickle.load(nanfile) mp.map.nanmap = nandata print(str(len(observations)) + ' samples loaded') n_initial_samples = len(observations) ## Set counters n_samples = n_initial_samples itercount = 1 mp.map.initialisemap(observations) # Fit initial samples into map mp.map.initialisemeans(observations) mp.map.estimatemeans() mp.nancounter = 0 mp.observations = observations #mp.map.updatemeans(observations[-1]) DC_niches_filled.append(count_niches_filled()) #Keep track of niches ## Build GP for fitness printinfo('train_gp', n_samples, logger=logger) retrain = True #Initial value to train new GP hyperparameters omodel, training_time, hypers = buildpymodel2(observations, n_initial_samples) tic = time.time() #mp.map.generate_niche_models(omodel) print('models generated in ', time.time() - tic) DC_training_times.append(training_time) #Keep track of training times mp.map.calcstdmeans(observations, omodel.mean_module.constant.item()) # Update means if unknown_features: #TODO - reimplement unknown features # # obs_f = np.reshape( [ feat for feat in mp.map. flatten() ],[ -1 , xdims ] ) # # fmodel = buildmodel(obs_x,obs_fy) pass # Calculate initial solution value objvallist = [] objval = calc_objval() objvallist.append(objval) printinfo('initscore', objval, logger=logger) printinfo('line', logger=logger) ## Main Acquisition Loop for i in range(max_evals - n_initial_samples): printinfo('acquisition', itercount, n_samples, logger=logger) ## Perform BOP-Elites nextpoint, _, candidatelist = DoContinuousBOPELITES( omodel, optn, acq='BOP') #This performs the actual BOP-Elites triallist = np.copy(candidatelist) valuefound = False while len(triallist) > 0: nextpoint = triallist[-1] #print(nextpoint) newobs = new_sample(ofun, ffun, nextpoint) if newobs[-1] == False: triallist = [] observations.append(newobs[:-1]) valuefound = True else: #obslist.append(newobs[:-1]) print(mp.nancounter) mp.saved_points = np.delete(mp.saved_points, -1) triallist = triallist[:-1] if not valuefound: print(observations[0][0]) print(candidatelist[-1]) mp.map.store_nan(candidatelist[-1]) savenans(mp.map.nanmap, mydir) newobs = expand_nan(ofun, ffun, candidatelist[-1]) observations.append(newobs[:-1]) printinfo('nans', mp.nancounter, logger=logger) printinfo('new point', newobs[0], observations, logger=logger) ## Update GP printinfo('train_gp', itercount, logger=logger) if valuefound: omodel, training_time, hypers = buildpymodel2( observations, n_initial_samples) fantasies = mp.map.fantasize_nans() # else: # observations_plus = observations #, training_time , hypers = buildpymodel2( observations_plus , # n_initial_samples)#, #*hypers , #retrain = retrain) retrain = not retrain #Toggle flag ie- Don't retrain Hyper-parameters every iteration ## Update the main map of solutions. mp.map.updatemap(observations) mp.map.updatepointmean([observations[-1]]) mp.map.updatemeans(observations[-1], omodel, mp.feature_resolution) #mp.map.generate_niche_models(omodel) if len(fantasies) > 0: #obsplus = np.vstack([observations , fantasies]) #print(obsplus[:-3]) observations_plus = np.vstack([observations, np.array(fantasies)]) omodel = update_with_nans(observations_plus, omodel) #omodel , training_time , hypers = buildpymodel2( obsplus, n_initial_samples) #mp.map.generate_niche_models(omodel) mp.map.calcstdmeans(observations, omodel.mean_module.constant.item()) # Update means #Data_Collection nniches = count_niches_filled() DC_niches_filled.append(nniches) DC_training_times.append(training_time) objval = calc_objval() objvallist.append(objval) printinfo('current_value', objval, logger=logger) printinfo('line', logger=logger) savepoints(observations, mydir) #Save current observations itercount += 1 n_samples += 1 printinfo('final_value', str(np.nansum(mp.map.fitness)), logger=logger) print(f'\n Illuminating Final Prediction Map') pred_map, pred_map_value = getpredmap(omodel, observations, logger) save_data(DC_training_times, DC_niches_filled, objvallist, pred_map_value, mp.map.fitness, pred_map.genomes, seed, observations, mydir) return (mp.map, objvallist)
try: parser = argparse.ArgumentParser() parser.add_argument('--seed', default = 8 , type=int) parser.add_argument('--UCB_param', default = 3.7 , type=float) parser.add_argument('--ns', default = 10 , type=int) args = parser.parse_args() seed = args.seed ns = args.ns UCB_param = args.UCB_param except: seed = 8 UCB_param = 3.7 ns=10 ss.myseed = seed ss.initseed(ss.myseed) #cur_dir = os.path.dirname(__file__) cur_dir = os.path.dirname(os.path.realpath(__file__)) ################################################ ###### Set the config file to load from #import domain.set_domain as sd domain_flag = 'simple2d' ##Change this to change your domain ####### if domain_flag == 'ffd': sys.path.insert(0, cur_dir+'/domain/FFD_config') if domain_flag == 'rastrigin': sys.path.insert(0, cur_dir+'/domain/Rastrigin_config')
parser = argparse.ArgumentParser() parser.add_argument('--seed', default=8, type=int) # Random Seed parser.add_argument('--info', default='', type=str) # Additional info parser.add_argument('--ns', default=10, type=int) # Additional info args = parser.parse_args() niche_size = args.ns seed = args.seed info = args.info except: seed = 9 # Default seed is 2 info = '' # Default no extra info niche_size = 50 ss.myseed = seed ss.initseed(ss.myseed) # This propogates the seed throughout all scripts. ################################################ ###### Set the config file to load from ######## ################################################ domain_flag = 'parsec' #Change this to change your domain ####### cur_dir = os.getcwd() # Set Current Directory if domain_flag == 'ffd': sys.path.insert(0, cur_dir + '/domain/FFD_config') if domain_flag == 'rastrigin': sys.path.insert(0, cur_dir + '/domain/Rastrigin_config') if domain_flag == 'simple2d':