def binary_fn(self, x, y): ''' The black_box function is a binary function of form y*sin(2*pi*x) + x*cos(2*pi*y) ''' x_temp, y_temp, prev_x_temp, prev_y_temp = self.init_values(x, y) self.prev_position_x = x self.prev_position_y = y position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() # Calculation of f(X) function result = y * math.sin(2 * math.pi * x) + x * math.cos(2 * math.pi * y) if self.best < abs(result): best = abs(result) print('The best outcome reward is {}'.format(best)) self.count += 1 return result
def probability_density_fn(self, x): ''' result = -(1/math.sqrt(2*math.pi*var))*(math.exp(-((x-mean)**2) / (2*var))) ''' mean = 0.3 var = 0.2 prev_x_temp = self.poppy_angle_settings() if self.search_domain is (0, 1): x_temp = x * 180 - 90 else: search_temp = self.search_domain[1] temp_x = (180 / (search_temp * 2)) x_temp = (prev_x_temp + search_temp) * temp_x - 90 self.prev_position_x = x position_difference = abs(x_temp - prev_x_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, position_difference_x=position_difference, final_movement=final_flag) poppy_mov.single_dimensional_poppy_robot() # Calculation of f(X) function result = -stats.norm(mean, var).pdf(x) #String the list of explored parameters to the BO function bo.Bayesian_optimization_main.explored_parameters_fn(x=x, result=result) if self.best < abs(result): best = abs(result) print('The best outcome reward is {}'.format(best)) self.count += 1 return result
def ackley_fn(self, x, y): x_temp, y_temp, prev_x_temp, prev_y_temp = self.init_values(x, y) self.prev_position_x = x self.prev_position_y = y position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() a = -20 * math.exp(-0.2 * (math.sqrt(0.5 * (math.pow(x, 2) + math.pow(y, 2))))) b = -math.exp(0.5 * (math.cos(2 * math.pi * x) + math.cos(2 * math.pi * y))) c = math.exp(1) d = 20 result = a + b + c + d if self.best < abs(result): best = abs(result) print('The best outcome reward till now is {}'.format(best)) self.count += 1 return result
def schaffer_N_4(self, x): prev_x_temp, prev_y_temp = self.poppy_angle_settings() x_temp = x[0, 0] * 180 - 90 y_temp = -(x[0, 1] * 70) - 110 position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() a = (math.pow( math.cos(math.sin( abs(math.pow(x[0, 0], 2) - math.pow(x[0, 1], 2)))), 2) - 0.5) / (math.pow( (1 + 0.001 * (math.pow(x[0, 0], 2) + math.pow(x[0, 1], 2))), 2)) result = -1 * (0.5 + a) self.logDicx[self.count] = x self.logDicy[self.count] = result print('\n Best X: ', self.logDicx[min(self.logDicy, key=self.logDicy.get)]) print('\n Best Y: ', self.logDicy[min(self.logDicy, key=self.logDicy.get)]) #String the list of explored parameters to the BO function bo.Bayesian_optimization_main.explored_parameters_fn( x=self.logDicx, result=self.logDicy) if self.best < abs(result): best = abs(result) print('The best outcome reward till now is {}'.format(best)) self.count += 1 return result
def eesom_fn(self, x): x_temp, y_temp, prev_x_temp, prev_y_temp = self.init_values(x, y) position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() a = math.cos(x[0, 0]) b = math.cos(x[0, 1]) c = np.exp(-(math.pow(x[0, 0] - np.pi, 2) + math.pow(x[0, 1] - np.pi, 2))) result = a * b * c self.logDicx[self.count] = x self.logDicy[self.count] = result print('\n Best X: ', self.logDicx[min(self.logDicy, key=self.logDicy.get)]) print('\n Best Y: ', self.logDicy[min(self.logDicy, key=self.logDicy.get)]) #String the list of explored parameters to the BO function bo.Bayesian_optimization_main.explored_parameters_fn( x=self.logDicx, result=self.logDicy) if self.best < abs(result): best = abs(result) print('The best outcome reward till now is {}'.format(best)) self.count += 1 return result
def __init__(self, search_domain, is_two_dimensional=False): self.is_two_dimensional = is_two_dimensional self.search_domain = search_domain self.best = 0 self.count = 0 self.poppy_mov = poppy.PoppyBO(count=self.count) if self.is_two_dimensional is True: self.prev_position_x = 0 self.prev_position_y = 0 else: self.prev_position_x = 0
def __init__(self, search_domain, is_two_dimensional=False): self.is_two_dimensional = is_two_dimensional self.MAX = 0.95 self.best = 0 self.count = 0 self.search_domain = search_domain self.poppy_mov = poppy.PoppyBO(count=self.count) if self.is_two_dimensional is True: self.logDicx = dict() self.logDicy = dict() else: self.prev_position_x = 0
def schaffer_N_4(self, x, y): x_temp, y_temp, prev_x_temp, prev_y_temp = self.init_values(x, y) self.prev_position_x = x self.prev_position_y = y position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() a = (math.pow(math.cos(math.sin(abs(math.pow(x, 2) - math.pow(y, 2)))), 2) - 0.5) / (math.pow( (1 + 0.001 * (math.pow(x, 2) + math.pow(y, 2))), 2)) result = 0.5 + a if self.best < abs(result): best = abs(result) print('The best outcome reward till now is {}'.format(best)) self.count += 1 return result
def sinc_fn(self, x): prev_x_temp, prev_y_temp = self.poppy_angle_settings() if self.search_domain is (0, 1): x_temp = x * 180 - 90 else: search_temp = self.search_domain[1] temp_x = (180 / (search_temp * 2)) x_temp = (prev_x_temp + search_temp) * temp_x - 90 self.prev_position_x = x position_difference_x = abs(x_temp - prev_x_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, position_difference_x=position_difference_x, final_movement=final_flag) poppy_mov.single_dimensional_poppy_robot() result = -np.sinc(x) bo.Bayesian_optimization_main.explored_parameters_fn(x=x, result=result) if self.best < abs(result): self.best = abs(result) print('The best outcome reward till now is {}'.format(self.best)) self.count += 1 return result
def sinc_fn(self, x): prev_x_temp, prev_y_temp = self.poppy_angle_settings() if self.search_domain is (0, 1): x_temp = x * 180 - 90 else: search_temp = self.search_domain[1] temp_x = (180 / (search_temp * 2)) x_temp = (prev_x_temp + search_temp) * temp_x - 90 self.prev_position_x = x position_difference_x = abs(x_temp - prev_x_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, position_difference_x=position_difference_x, final_movement=final_flag) poppy_mov.single_dimensional_poppy_robot() if x is 0: result = 1 else: result = (math.sin(x) / x) if self.best < abs(result): self.best = abs(result) print('The best outcome reward till now is {}'.format(self.best)) self.count += 1 return result
def eesom_fn(self, x, y): x_temp, y_temp, prev_x_temp, prev_y_temp = self.init_values(x, y) self.prev_position_x = x self.prev_position_y = y position_difference_x = abs(x_temp - prev_x_temp) position_difference_y = abs(y_temp - prev_y_temp) final_flag = False poppy_mov = poppy.PoppyBO(next_position_x=x_temp, next_position_y=y_temp, position_difference_x=position_difference_x, position_difference_y=position_difference_y, final_movement=final_flag) poppy_mov.multi_dimensional_poppy_robot() a = math.cos(x) b = math.cos(y) c = math.exp(-(math.pow(x - math.pi, 2) + math.pow(y - math.pi, 2))) result = -(a * b * c) if self.best < abs(result): best = abs(result) print('The best outcome reward till now is {}'.format(best)) self.count += 1 return result
def get_results(self): ''' Displays the final results and plot related to the experiments. ''' if self.is_two_dimensional is False: # List of all the positions the robot and explored and exploited self.x_values = [i * 180 - 90 for i in self.x_values] print('The x observations are', self.x_values) print('The y observations are', self.y_values) # Final optimal position of the poppy after EXPLOITATION and EXPLORATION position = self.myBopt.x_opt * 180 - 90 prev_position = self.x_values[-1] * 180 - 90 if position > prev_position: difference = position - prev_position else: difference = abs(position - prev_position) final_flag = True poppy_mov = poppy.PoppyBO(next_position_x=position, position_difference_x=difference, final_movement=final_flag) poppy_mov.single_dimensional_poppy_robot() # Display of the optimal point and it's reward value print('The optimal point of function is x -> {}'.format( self.myBopt.x_opt)) print('The optimal value of function is y -> {}'.format( abs(self.myBopt.fx_opt))) # Plotting of the iterations and the consecutiveness between the X's self.myBopt.plot_acquisition() self.myBopt.plot_convergence() else: # Final optimal position of the poppy after EXPLOITATION and EXPLORATION position_x, position_y = self.myBopt.x_opt print('The best x value is {}'.format(position_x)) print('The best y value is {}'.format(position_y)) position_x = position_x * 180 - 90 position_y = -(position_y * 70) - 110 di_temp = self.x_dict_values.get(self.num_iter + 4) prev_position_x, prev_position_y = di_temp[0][0], di_temp[0][1] print('The last x value is {}'.format(prev_position_x)) print('The last y value is {}'.format(prev_position_y)) prev_position_x = prev_position_x * 180 - 90 prev_position_y = -(prev_position_y * 70) - 110 if position_x > prev_position_x: difference_x = position_x - prev_position_x else: difference_x = abs(position_x - prev_position_x) if position_y > prev_position_y: difference_y = position_y - prev_position_y else: difference_y = abs(position_y - prev_position_y) flag = True poppy_mov = poppy.PoppyBO(next_position_x=position_x, next_position_y=position_y, position_difference_x=difference_x, position_difference_y=difference_y, final_movement=flag) poppy_mov.multi_dimensional_poppy_robot() # Display of the optimal point and it's reward value print('The optimal point of function is x -> {}'.format( self.myBopt.x_opt)) print('The optimal value of function is y -> {}'.format( abs(self.myBopt.fx_opt))) # Plotting of the iterations and the consecutiveness between the X's self.myBopt.plot_acquisition() self.myBopt.plot_convergence()