def add_noise(x, y, noise_type, snr=None, mean=0, std=1, probability_threshold=0.5, seed=42): if noise_type == "gaussian_noise": noise = generate_gaussian_noise(y.shape[0], mean, std, seed) elif noise_type == "white_noise": noise = generate_white_noise(y.shape[0], seed) elif noise_type == "salt_and_papper_noise": noise = generate_salt_and_papper_noise(y, probability_threshold, seed) elif not (noise_type): noise = np.zeros(y.shape[0]) else: raise Exception(f"Invalid noise type: {noise_type}") x2y = {} if snr is not None: y_power = np.mean(y * y) noise_power = np.mean(noise * noise) new_noise = noise_power * noise / snr new_y = y + new_noise for xx, yy in zip(x, new_y): x2y[str(get_numpy_array(xx))] = yy else: new_y = y + noise for xx, yy in zip(x, new_y): x2y[str(get_numpy_array(xx))] = yy return x2y
def __call__(self): """Сalculate function value""" if self.x2y: return self.x2y[str(get_numpy_array(self.x))] return ( tf.math.exp((self.x[0]**4 + self.x[1]**2) / 100.0) + self.x[2] )
def __call__(self): """Сalculate function value""" if self.x2y: return self.x2y[str(get_numpy_array(self.x))] return (self.x[0]**3 + tf.sin( (self.x[0] + self.x[1]) * math.pi / 180) + math.e**(-self.x[2]) + 1 / (self.x[3]**2 + 4))
def __call__(self): """Сalculate function value""" if self.x2y: return self.x2y[str(get_numpy_array(self.x))] return ( - self.x[0] * self.x[1] + tf.sin((self.x[2]) * math.pi / 180) + self.x[2]**4 )
def optimize(f, optimize_option): """Minimize target fuction with specific option :param f: optimize function :type f: heir from BaseFunction :param optimize_option: specific optimize option :type optimize_option: dict :return: result of optimization :rtype: dict """ history = {} x_cur = copy.deepcopy(optimize_option["x"]) x_old = copy.deepcopy(optimize_option["x"]) steps_num = 0 x_min = copy.deepcopy(optimize_option["x"]) loss_min = f(x_min) max_steps = optimize_option.get("max_steps", 10**10) eps = optimize_option["eps"] result_vals = f.get_minimum() opt = optimize_option["opt"] while True: # Minimize cycle if steps_num >= max_steps: break with tf.GradientTape() as t: loss_val = f(x_cur) gradients = t.gradient(loss_val, x_cur) opt.apply_gradients(zip(gradients, x_cur)) x_delta = get_delta(x_old, x_cur) history[steps_num] = { "x": get_numpy_array(x_cur), "x_old": get_numpy_array(x_old), "loss": loss_val.numpy(), "x_delta (L2 Norm)": x_delta, } x_old = copy.deepcopy(x_cur) if loss_val < loss_min: x_min = copy.deepcopy(x_cur) loss_min = loss_val if x_delta < eps: break steps_num += 1 min_delta = [] min_delta_result = float('inf') if result_vals is not None: for result_val in result_vals: min_delta.append({ "min": get_numpy_array(result_val), "delta": get_delta(result_val, x_min), }) min_delta_result = min(min_delta_result, min_delta[-1]["delta"]) result = { "x_final": get_numpy_array(x_cur), "min_delta_list (L2 Norm)": min_delta, "min_delta_result": min_delta_result, "steps_num": steps_num, "history": history, "loss_min": loss_min.numpy(), "x_min": get_numpy_array(x_min), } return result
def approximate(f, f_target, approximate_option): """Approximate target fuction with specific option :param f: approximate function :type f: heir from BaseApproximateFunction :param f_target: target function :type f_target: heir from BaseTargetFunction :param approximate_option: specific approximate option :type approximate_option: dict :return: result of approximation :rtype: dict """ history = {} params_cur = copy.deepcopy(approximate_option["params"]) params_old = copy.deepcopy(approximate_option["params"]) steps_num = 0 params_min = copy.deepcopy(approximate_option["params"]) f_target_val = [] f_val = [] var_list = approximate_option["x"] var_list_validate = approximate_option["x_validate"] loss_function = approximate_option["loss_function"] opt = approximate_option["opt"] eps = approximate_option["eps"] max_steps = approximate_option.get("max_steps", 10**10) val = approximate_option.get("val", None) """ if val: f_target.val = val full_x = np.concatenate((np.array(var_list), np.array(var_list_validate))) full_y = [] for x in full_x: f_target.set_var_list(x) full_y.append(f_target()) full_y = np.array(full_y) snr = approximate_option.get("snr", None) mean = approximate_option.get("mean", 0) std = approximate_option.get("std", 1) probability_threshold = approximate_option.get( "probability_threshold", 0.5 ) seed = approximate_option.get("seed", 42) noise_type = approximate_option.get("noise_type", "") x2y = add_noise( full_x, full_y, noise_type, snr, mean, std, probability_threshold, seed ) if noise_type: f_target.set_x2y(x2y) """ for x in var_list: f.set_var_list(x) f_target.set_var_list(x) f_val.append(f(params_cur)) f_target_val.append(f_target()) # Calculate nearness of target and approximate function loss_min = loss_function(f_target_val, f_val) while True: # Approximate cycle if steps_num >= max_steps: break with tf.GradientTape() as t: f_target_val = [] f_val = [] for x in var_list: f.set_var_list(x) f_target.set_var_list(x) f_val.append(f(params_cur)) f_target_val.append(f_target()) # Calculate nearness of target and approximate function loss_val = loss_function(f_target_val, f_val) gradients = t.gradient(loss_val, params_cur) opt.apply_gradients(zip(gradients, params_cur)) params_delta = get_delta(params_old, params_cur) history[steps_num] = { "params": get_numpy_array(params_cur), "params_old": get_numpy_array(params_old), "loss": loss_val.numpy(), "params_delta (L2 Norm)": params_delta, } params_old = copy.deepcopy(params_cur) if loss_val < loss_min: params_min = copy.deepcopy(params_cur) loss_min = loss_val if params_delta < eps: break steps_num += 1 f_target_val = [] f_val = [] for x in var_list_validate: f.set_var_list(x) f_target.set_var_list(x) f_val.append(f(params_min)) f_target_val.append(f_target()) loss_validate = loss_function(f_target_val, f_val) result = { "steps_num": steps_num, "history": history, "loss_min": loss_min.numpy(), "loss_validate": loss_validate.numpy(), "params_min": get_numpy_array(params_min), } return result