def fitness_calc( self, problem, src_models, target_model, sample_size, sub_sample_size ): # You can implement this in a more optmized way using vectorizatioin but it will hurt modularity start = time() normalized_alpha = self.genes / np.sum(self.genes) mixModel = MixtureModel(src_models, alpha=normalized_alpha) mixModel.add_target_model(target_model) # mixModel.createTable(Chromosome.genes_to_numpy(pop), True, 'umd') # mixModel.EMstacking() # mixModel.mutate() print('sample start') offsprings = mixModel.sample(sample_size) print('sample end') print('selecting start') idx = np.random.randint(sample_size, size=sub_sample_size) offsprings = offsprings[idx] # Creating sub_samples of samples print('selecting end') offsprings = np.array( [Chromosome(offspring) for offspring in offsprings]) sfitness = np.zeros(sub_sample_size) print('fitness_calc start') for i in range(sub_sample_size): sfitness[i] = offsprings[i].fitness_calc(problem) print('fitness_calc end') self.fitness = np.mean(sfitness) self.fitness_calc_time = time() - start # best_offspring = np.max(offsprings) return self.fitness, offsprings
def fitness_calc_pole(self, net, cart, s_len, src_models, target_model, sample_size, solution_found=None, mutation_vec=None, prev_samples=None, efficient_version=False): start = time() if not efficient_version or (mutation_vec is None): normalized_alpha = self.genes / np.sum(self.genes) else: normalized_alpha = np.clip(mutation_vec, 0, None) mixModel = MixtureModel(src_models, alpha=normalized_alpha) mixModel.add_target_model(target_model) if efficient_version: pass else: offsprings = mixModel.sample(sample_size) # idx = np.random.randint(sample_size, size=sub_sample_size) # offsprings = offsprings[idx] # Creating sub_samples of samples # print('selecting end') offsprings = np.array( [ChromosomePole(offspring) for offspring in offsprings]) func_eval_num = 0 sfitness = np.zeros(sample_size) # print('fitness_calc start') for i in range(sample_size): sfitness[i] = offsprings[i].fitness_calc(net, cart, s_len) if not solution_found.value: func_eval_num += 1 if sfitness[i] - 2000 > -0.0001: solution_found.value = True # print('fitness_calc end') self.fitness = np.mean(sfitness) # print('sample end') # print('selecting start') self.fitness_calc_time = time() - start # best_offspring = np.max(offsprings) return self.fitness, offsprings, func_eval_num
def fitness_calc_pole(self, net, cart, s_len, src_models, target_model, sample_size, mutation_strength, samples_count, solution_found=None): start = time() if not all(self.genes == self.genes[0]): termination_mask = self.genes > (1 / (len(src_models) + 1) * 0.01) * 1.0 genes = termination_mask * self.genes genes = genes / np.sum(genes) print('genes after normalization: ', genes) else: genes = self.genes print('first iteration!? not neutralized') # Initializing the weights of the mixture model with mixModel = MixtureModel(src_models, alpha=genes) mixModel.add_target_model(target_model) offsprings, mutation_strength, samples_count, fitness_mean, eval_num = \ mixModel.sample_enhanced(sample_size, cart, mutation_strength, samples_count, net=net, s_len=s_len, mutation=False, solution_found=solution_found, problem_type='pole') self.fitness = fitness_mean self.fitness_calc_time = time() - start print('self.fitness_calc_time (m): ', self.fitness_calc_time / 60) # best_offspring = np.max(offsprings) return self.fitness, offsprings, mutation_strength, samples_count, eval_num
def fitness_calc_enhanced(self, problem, src_models, target_model, sample_size, mutation_strength, samples_count, max_sampling_num=None, mutation=True, problem_type='knapsack'): start = time() if (not all(self.genes == self.genes[0])): termination_mask = self.genes > (1 / (len(src_models) + 1) * 0.01) * 1.0 # BUG genes = termination_mask * self.genes genes = genes / np.sum(genes) else: genes = self.genes print('first iteration!? not neutralized') # Initializing the weights of the mixture model with mixModel = MixtureModel(src_models, alpha=genes) mixModel.add_target_model(target_model) offsprings, mutation_strength, samples_count, fitness_mean = \ mixModel.sample_enhanced(sample_size, problem, mutation_strength, samples_count, max_sampling_num, mutation=mutation, problem_type=problem_type) self.fitness = fitness_mean self.fitness_calc_time = time() - start return self.fitness, offsprings, mutation_strength, samples_count
def fitness_calc( self, problem, src_models, target_model, sample_size, sub_sample_size, mutation_vec=None, prev_samples=None, efficient_version=False ): # You can implement this in a more optmized way using vectorizatioin but it will hurt modularity start = time() if not efficient_version or (mutation_vec is None): normalized_alpha = self.genes / np.sum(self.genes) else: normalized_alpha = np.clip(mutation_vec, 0, None) mixModel = MixtureModel(src_models, alpha=normalized_alpha) mixModel.add_target_model(target_model) # mixModel.createTable(Chromosome.genes_to_numpy(pop), True, 'umd') # mixModel.EMstacking() # mixModel.mutate() # print('sample start') if efficient_version: offsprings = mixModel.sample_dic(sample_size) flat_offsprings = np.array([]) is_prev = (prev_samples is not None) and (mutation_vec is not None) if is_prev: # removing samples removing_samples = np.clip( np.ceil(mutation_vec * sample_size).astype(int), None, 0) # print('Removing Samples: {}'.format(removing_samples)) for i in range(len(removing_samples)): if removing_samples[i] != 0: r_num = len(prev_samples[i]) + removing_samples[i] if r_num != 0: prev_samples[i] = np.random.choice(prev_samples[i], r_num, replace=False) else: prev_samples[i] = None # adding sapmles for i in range(len(offsprings)): if offsprings[i] is not None: if prev_samples[i] is None: prev_samples[i] = offsprings[i] else: offspring_add = [ Chromosome(offspring) for offspring in offsprings[i] ] flat_offsprings = np.append( flat_offsprings, offspring_add) prev_samples[i] = np.append(prev_samples[i], offspring_add, axis=0) offsprings = prev_samples self.fitness = 0 count = 0 # fitness calc for i in range(len(offsprings)): if offsprings[i] is not None: if not is_prev: offspring_add = np.array([ Chromosome(offspring) for offspring in offsprings[i] ]) flat_offsprings = np.append(flat_offsprings, offspring_add) offsprings[i] = offspring_add for j in range(len(offsprings[i])): self.fitness += offsprings[i][j].fitness_calc(problem) count += 1 # print('number of all samples: {}'.format(count)) self.fitness /= count return self.fitness, flat_offsprings, offsprings else: offsprings = mixModel.sample(sample_size) idx = np.random.randint(sample_size, size=sub_sample_size) offsprings = offsprings[idx] # Creating sub_samples of samples # print('selecting end') offsprings = np.array( [Chromosome(offspring) for offspring in offsprings]) sfitness = np.zeros(sub_sample_size) # print('fitness_calc start') for i in range(sub_sample_size): sfitness[i] = offsprings[i].fitness_calc(problem) # print('fitness_calc end') self.fitness = np.mean(sfitness) # print('sample end') # print('selecting start') self.fitness_calc_time = time() - start # best_offspring = np.max(offsprings) return self.fitness, offsprings