def calculate_final_score(mymap): '''This function takes the predictive map and calculates the predicted score by assessing their value on the real functions. ''' xdims = len(mp.domain.valid_ranges) genomes = mymap.genomes[~np.isnan(mymap.fitness)].flatten() truevals = [mp.fitness_fun(x) for x in np.reshape(genomes, [-1, xdims])] return (np.nansum(truevals))
def pred_true_map(pred_map): truemap,_ = create_map( mp.feature_resolution , mp.domain ) rows = np.linspace(0,len(pred_map[0])-1,len(pred_map[0])) cols = np.linspace(0,len(pred_map)-1,len(pred_map)) indexes = list(it.product(rows, cols)) for i in indexes: j = tuple([int(a) for a in i]) truemap.fitness[j] = mp.fitness_fun(pred_map[j]) truemap.genomes[j] = pred_map[j] return(truemap)
def evalgenomes(genomes): truemap, _ = create_map(mp.feature_resolution, mp.domain) rows = np.linspace(0, 24, 25) cols = np.linspace(0, 24, 25) indexes = list(it.product(rows, cols)) for count, g in enumerate(genomes): j = tuple([int(a) for a in indexes[count]]) truemap.fitness[j] = mp.fitness_fun(g) truemap.genomes[j] = g return (truemap)
def pred_true_map(pred_map): '''Takes a prediction map as input and outputs a map with those points and their true fitness values ''' truemap = create_map(mp.feature_resolution, mp.domain) rows = np.linspace(0, len(pred_map[0]) - 1, len(pred_map[0])) cols = np.linspace(0, len(pred_map) - 1, len(pred_map)) indexes = list(it.product(rows, cols)) for i in indexes: j = tuple([int(a) for a in i]) truemap.fitness[j] = mp.fitness_fun(list(pred_map[j])) truemap.genomes[j] = pred_map[j] return (truemap)
def plot_fit_ls(): range1 = mp.domain.valid_ranges[0] range2 = mp.domain.valid_ranges[1] x1 = np.linspace(range1[0], range1[1], 300) x2 = np.linspace(range2[0], range2[1], 180) X = list(it.product(x1, x2)) Y = [mp.fitness_fun(x) for x in X] xplot1 = [x[0] for x in X] xplot2 = [x[1] for x in X] fig, ax = plt.subplots() fig.set_figheight(5) fig.set_figwidth(10) scat = ax.scatter(xplot1, xplot2, c=Y, s=1, cmap='viridis') plt.colorbar(scat) #plotgenomeposition(pred_maps[-1][0]) plt.show()
# sobolmap,fitness = getpredmap(initial_samples) # sobol_fit_list.append(fitness) # pickle.dump(sobol_fit_list, filehandler) #### ### Random Samples def random_point(n, dims): x=[] for i in range(n): x.append(np.random.uniform(0,1,dims)) return(x) random_fit_list = [] for i in range(1): ss.initseed(i) x = random_point(10,10) rand_points = [[x[i],mp.fitness_fun(x[i]),mp.feature_fun(x[i])] for i in range(len(x))] for i in range(len(rand_points)): rand_points[i][1] = np.nansum(rand_points[i][1]) filehandler = open('Results/Init-Test/Random/fitness.csv', 'wb') mp.map , edges = create_map(mp.feature_resolution, mp.domain ) initialisemap(rand_points) randommap,fitness = getpredmap(rand_points) random_fit_list.append(fitness) pickle.dump(sobol_fit_list, filehandler)
def additional_sampling(n_add_samples, sobol_set, sobol_point, map): '''additional_sampling - Samples are produced using a Sobol sequence that evenly selects elites from the current acquisition map. Those elites are then evaluated and added to the observation list (which will improve the surrogate model and also improve the prediction map) ##TODO If samples are invalid (invalid geometry, or did not converge in simulator), the next sample in the Sobol sequence is chosen. Lather, rinse, repeat until all initial samples are clean. Example: new_points = additional_sampling(100 , my_sobol, current_point , map) Inputs (arguments): n_add_samples - [ Integer ] - number of samples to produce sobol_set - [ List ] - A set of random points in feature space sobol_point - [ Integer ] - An index for the sobol_set map - [ Map Class ] - The current acquisition map. Inputs (from config file): mp.domain - domain Class object .valid_ranges - [ List ] - To check validity of samples Outputs: valid_points - [ n_initial_samples * [ [ x ] , [ y ] , [ f ] ] ] ] sample_end - [ Integer ] - An update to sobol_point (an index) Code Author: Paul Kent Warwick University email: [email protected] Oct 2020; Last revision: 16-Oct-2020 ''' valid_ranges = mp.domain.valid_ranges # Valid Search domain ranges new_value = [] new_sample = [] n_missing = n_add_samples new_points = [] sample_end = sobol_point + 10 * n_add_samples random_genomes = [ sobol_set[i][sobol_point:sample_end] for i in range(len(sobol_set)) ] random_genomes = np.array(random_genomes).T random_genomes = np.reshape(random_genomes, (-1, len(valid_ranges))) # identify which niche each point belongs to (in feature space) niche_index = [ nichefinder(mp.feature_fun(random_genomes[i]), map, mp.domain) for i in range(len(random_genomes)) ] # Remove duplicates, keep track of the random genomes index (n) and the feature values (i) niche_index = [ i for n, i in enumerate(niche_index) if i not in niche_index[:n] ] valid_points = [] while n_missing > 0 and len(niche_index) > 0: niche_id = niche_index[-1] #print(niche_id) #print(map.fitness[ tuple( niche_id ) ]) fit = map.fitness[tuple(niche_id)] gen = map.genomes[tuple(niche_id)] if not np.isnan(fit).any(): true_fit = mp.fitness_fun(gen) true_feat = mp.feature_fun(gen) if ~np.isnan(true_fit) and ~np.isnan(true_feat).any(): valid_points.append([gen, true_fit, true_feat]) n_missing -= 1 #else: # true_fit = 0 # valid_points.append( [gen , true_fit, true_feat] ) # n_missing -= 1 ### Assume NAN is zero ### WARNING - TEST Assumption niche_index.pop() # valid = [] # valid , n_missing = keep_valid( valid, # np.array(new_points).T , # n_missing, # validate ) # valid_points.append(valid) if len(niche_index) <= 0: print('Not enough unique samples to make all new evaluations') print('This can happen on the first few runs') print('or could indicate a problem with your functions') n_missing = 0 if n_missing <= 0: print( f'{Fore.GREEN}Success: {Style.RESET_ALL} New points to evaluate chosen from acquisition map' ) #Reshaping # print('value check', validate(new_points[0].T)) #new_points = [new_points[i].T for i in range(len(new_points))] # print(keep_valid( new_points.T, # new_points , # n_add_samples, # validate )) # new_points = np.array(new_points).T #validating and sample from fitness/feature functions # valid_points = [] # valid_points , n_missing = keep_valid( valid_points, # np.array(new_points).T , # n_add_samples, # validate ) if len(valid_points) == n_add_samples: print(f'{Fore.GREEN}Success: {Style.RESET_ALL} New points evaluated') elif len(valid_points) == 0: print(f'{Fore.RED}FAILURE: {Style.RESET_ALL} No new Points selected ') else: print( f'{Fore.YELLOW}Warning: {Style.RESET_ALL} Not all new points evaluated correctly ' ) return (valid_points, sample_end, len(valid_points))
### mp.ME_params = (0.0, 2**6, 0.1, 2**9) predmap = create_prediction_map(fit_gp, points) #heatmap(np.array(fitness).reshape(25,25), 'none') heatmap(predmap.fitness, 'Predicted landscape after 500 (400 additional) samples') plot_GP(fit_gp) import torch from Visualisations import * plot_3d_post_GP(1, 1, 50, 50, fit_gp, [0], [0], [53]) plot_3d_contour2(1, 1, 100, 100, rastrigin, [0], [0], [53]) mp.fitness_fun(data[25]) heatmap(data, 'adam') data[0] predmap.genomes valmap = evalgenomes(predmap.genomes.reshape(-1, 10)) def evalgenomes(genomes): truemap, _ = create_map(mp.feature_resolution, mp.domain) rows = np.linspace(0, 24, 25) cols = np.linspace(0, 24, 25) indexes = list(it.product(rows, cols)) for count, g in enumerate(genomes): j = tuple([int(a) for a in indexes[count]]) truemap.fitness[j] = mp.fitness_fun(g) truemap.genomes[j] = g
filehandler = open( 'Results/Random_sampling' + '/' + str(seed) + '10x10BDC.csv', 'wb') pickle.dump(binary_data, filehandler) seed = 2160 valueranges = [20, 50, 100, 250, 500, 750, 1000] for i in range(20): values = [] np.random.seed(seed) seed += 1 initial_samples = np.array(lhsmdu.sample(1000, 10)) print(str(valueranges[0]) + 'samples') points = initial_samples[:valueranges[0]] fitness = mp.fitness_fun(points) features = mp.feature_fun(points) observations = [[points[i], fitness[i], features[i]] for i in range(valueranges[0])] mp.map, edges = create_map(mp.feature_resolution, mp.domain) initialisemap(observations) print('Score = ' + str(np.nansum(mp.map.fitness.flatten()))) values.append(np.nansum(mp.map.fitness.flatten())) for count, val in enumerate(valueranges[1:]): new_samples = initial_samples[valueranges[count]:valueranges[count + 1]] fitness = mp.fitness_fun(new_samples) features = mp.feature_fun(new_samples) new_observations = [[new_samples[i], fitness[i], features[i]] for i in range(len(new_samples))] updatemapSAIL(new_observations)