def __init__(self, optimizer, X, Y, args): ''' Initialize data exporter from initial data (X, Y). ''' self.optimizer = optimizer self.problem = optimizer.real_problem self.n_var, self.n_obj = self.problem.n_var, self.problem.n_obj self.batch_size = self.optimizer.selection.batch_size self.iter = 0 self.transformation = optimizer.transformation # saving path related self.result_dir = get_result_dir(args) n_samples = X.shape[0] # compute initial hypervolume pfront, pidx = find_pareto_front(Y, return_index=True) pset = X[pidx] if args.ref_point is None: args.ref_point = np.max(Y, axis=0) hv_value = calc_hypervolume(pfront, ref_point=args.ref_point) # init data frame column_names = ['iterID'] d1 = {'iterID': np.zeros(n_samples, dtype=int)} d2 = {'iterID': np.zeros(len(pset), dtype=int)} # design variables for i in range(self.n_var): var_name = f'x{i + 1}' d1[var_name] = X[:, i] d2[var_name] = pset[:, i] column_names.append(var_name) # performance for i in range(self.n_obj): obj_name = f'f{i + 1}' d1[obj_name] = Y[:, i] obj_name = f'Pareto_f{i + 1}' d2[obj_name] = pfront[:, i] # predicted performance for i in range(self.n_obj): obj_pred_name = f'Expected_f{i + 1}' d1[obj_pred_name] = np.zeros(n_samples) obj_pred_name = f'Uncertainty_f{i + 1}' d1[obj_pred_name] = np.zeros(n_samples) obj_pred_name = f'Acquisition_f{i + 1}' d1[obj_pred_name] = np.zeros(n_samples) d1['Hypervolume_indicator'] = np.full(n_samples, hv_value) self.export_data = pd.DataFrame(data=d1) # export all data self.export_pareto = pd.DataFrame(data=d2) # export pareto data column_names.append('ParetoFamily') self.export_approx_pareto = pd.DataFrame(columns=column_names) # export pareto approximation data self.has_family = hasattr(self.optimizer.selection, 'has_family') and self.optimizer.selection.has_family
def save_args(args): ''' Save arguments to yaml file ''' result_dir = get_result_dir(args) args_path = os.path.join(result_dir, 'args.yml') os.makedirs(os.path.dirname(args_path), exist_ok=True) with open(args_path, 'w') as f: yaml.dump(args, f, default_flow_style=False, sort_keys=False)
def experiment_dir(self): if self.args.resume: self.expname = os.path.split(self.args.resumedir)[-1] return self.args.resumedir elif self.args.eval: self.expname = os.path.split(self.args.evaldir)[-1] return self.args.evaldir expname = utils.get_result_dir(self.base_expdir, self.args.suffix) self.expname = expname return os.path.join(self.base_expdir, expname)
def __init__(self, X, Y, args): ''' Initialize data exporter from initial data (X, Y). ''' self.n_var, self.n_obj = args.n_var, args.n_obj self.batch_size = args.batch_size self.iter = 0 self.X, self.Y = X, Y self.ref_point = np.max( Y, axis=0) if args.ref_point is None else args.ref_point # saving path related self.result_dir = get_result_dir(args) n_samples = X.shape[0] # compute hypervolume pfront, pidx = find_pareto_front(Y, return_index=True) pset = X[pidx] hv_value = calc_hypervolume(pfront, ref_point=self.ref_point) # init data frame column_names = ['iterID'] d1 = {'iterID': np.zeros(n_samples, dtype=int)} d2 = {'iterID': np.zeros(len(pset), dtype=int)} # design variables for i in range(self.n_var): var_name = f'x{i + 1}' d1[var_name] = X[:, i] d2[var_name] = pset[:, i] column_names.append(var_name) # performance for i in range(self.n_obj): obj_name = f'f{i + 1}' d1[obj_name] = Y[:, i] obj_name = f'Pareto_f{i + 1}' d2[obj_name] = pfront[:, i] d1['Hypervolume_indicator'] = np.full(n_samples, hv_value) self.export_data = pd.DataFrame(data=d1) # export all data self.export_pareto = pd.DataFrame(data=d2) # export pareto data column_names.append('ParetoFamily') self.export_approx_pareto = pd.DataFrame( columns=column_names) # export pareto approximation data