def test_lhd(): lhd = LatinHypercube(dim=4, num_pts=10) X = lhd.generate_points() assert isinstance(lhd, ExperimentalDesign) assert np.all(X.shape == (10, 4)) assert lhd.num_pts == 10 assert lhd.dim == 4
def test_lhd(): lhd = LatinHypercube(dim=4, num_pts=10, criterion='c') X = lhd.generate_points() assert (isinstance(lhd, ExperimentalDesign)) assert (np.all(X.shape == (10, 4))) assert (lhd.num_pts == 10) assert (lhd.dim == 4)
def test_lhd_round(): num_pts = 10 dim = 3 lb = np.array([1, 2, 3]) ub = np.array([3, 4, 5]) int_var = np.array([1]) np.random.seed(0) lhd = LatinHypercube(dim=dim, num_pts=num_pts) X = lhd.generate_points(lb=lb, ub=ub, int_var=int_var) assert np.all(np.round(X[:, 1] == X[:, 1])) # Should be integers assert np.all(np.max(X, axis=0) <= ub) assert np.all(np.min(X, axis=0) >= lb)
def __init__(self, worker_id, data, response_surface, maxeval, nsamples, exp_design=None, sampling_method=None, extra=None, extra_vals=None): # Check stopping criterion self.start_time = time.time() if maxeval < 0: # Time budget self.maxeval = np.inf self.time_budget = np.abs(maxeval) else: self.maxeval = maxeval self.time_budget = np.inf # Import problem information self.worker_id = worker_id self.data = data self.fhat = response_surface if self.fhat is None: self.fhat = RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval) self.fhat.reset() # Just to be sure! self.nsamples = nsamples self.extra = extra self.extra_vals = extra_vals # Default to generate sampling points using Symmetric Latin Hypercube self.design = exp_design if self.design is None: if self.data.dim > 50: self.design = LatinHypercube(data.dim, data.dim+1) else: self.design = SymmetricLatinHypercube(data.dim, 2*(data.dim+1)) self.xrange = np.asarray(data.xup - data.xlow) # algorithm parameters self.sigma_min = 0.005 self.sigma_max = 0.2 self.sigma_init = 0.2 self.failtol = max(5, data.dim) self.succtol = 3 self.numeval = 0 self.status = 0 self.sigma = 0 self.resubmitter = RetryStrategy() self.xbest = None self.fbest = np.inf self.fbest_old = None # Set up search procedures and initialize self.sampling = sampling_method if self.sampling is None: self.sampling = CandidateDYCORS(data) self.check_input() # Start with first experimental design self.sample_initial()
def optimize(self): """Method used to run the Genetic algorithm :return: Returns the best individual and its function value :rtype: numpy.array, float """ # Initialize population if isinstance(self.start, np.ndarray): if self.start.shape[0] != self.nindividuals or \ self.start.shape[1] != self.nvariables: raise ValueError("Initial population has incorrect size") if any(np.min(self.start, axis=0) >= self.lower_boundary) or \ any(np.max(self.start, axis=0) <= self.upper_boundary): raise ValueError("Initial population is outside the domain") population = self.start elif self.start == "SLHD": exp_des = SymmetricLatinHypercube( self.nvariables, self.nindividuals) population = self.lower_boundary + exp_des.generate_points() * \ (self.upper_boundary - self.lower_boundary) elif self.start == "LHD": exp_des = LatinHypercube(self.nvariables, self.nindividuals) population = self.lower_boundary + exp_des.generate_points() * \ (self.upper_boundary - self.lower_boundary) elif self.start == "Random": population = self.lower_boundary + np.random.rand( self.nindividuals, self.nvariables) *\ (self.upper_boundary - self.lower_boundary) else: raise ValueError("Unknown argument for initial population") new_population = [] # Round positions if len(self.integer_variables) > 0: new_population = np.copy(population) population[:, self.integer_variables] = np.round( population[:, self.integer_variables]) for i in self.integer_variables: ind = np.where(population[:, i] < self.lower_boundary[i]) population[ind, i] += 1 ind = np.where(population[:, i] > self.upper_boundary[i]) population[ind, i] -= 1 # Evaluate all individuals function_values = self.function(population) if len(function_values.shape) == 2: function_values = np.squeeze(np.asarray(function_values)) # Save the best individual ind = np.argmin(function_values) best_individual = np.copy(population[ind, :]) best_value = function_values[ind] if len(self.integer_variables) > 0: population = new_population # Main loop for _ in range(self.ngenerations): # Do tournament selection to select the parents competitors = np.random.randint( 0, self.nindividuals, (self.nindividuals, self.tournament_size)) ind = np.argmin(function_values[competitors], axis=1) winner_indices = np.zeros(self.nindividuals, dtype=int) for i in range(self.tournament_size): # This loop is short winner_indices[np.where(ind == i)] = \ competitors[np.where(ind == i), i] parent1 = population[ winner_indices[0:self.nindividuals//2], :] parent2 = population[ winner_indices[self.nindividuals//2:self.nindividuals], :] # Averaging Crossover cross = np.where(np.random.rand( self.nindividuals//2) < self.p_cross)[0] nn = len(cross) # Number of crossovers alpha = np.random.rand(nn, 1) # Create the new chromosomes parent1_new = np.multiply(alpha, parent1[cross, :]) + \ np.multiply(1 - alpha, parent2[cross, :]) parent2_new = np.multiply(alpha, parent2[cross, :]) + \ np.multiply(1 - alpha, parent1[cross, :]) parent1[cross, :] = parent1_new parent2[cross, :] = parent2_new population = np.concatenate((parent1, parent2)) # Apply mutation scale_factors = self.sigma * ( self.upper_boundary - self.lower_boundary) # Scale perturbation = np.random.randn( self.nindividuals, self.nvariables) # Generate perturbations perturbation = np.multiply( perturbation, scale_factors) # Scale accordingly perturbation = np.multiply(perturbation, ( np.random.rand(self.nindividuals, self.nvariables) < self.p_mutation)) population += perturbation # Add perturbation population = np.maximum(np.reshape( self.lower_boundary, (1, self.nvariables)), population) population = np.minimum(np.reshape( self.upper_boundary, (1, self.nvariables)), population) # Round chromosomes new_population = [] if len(self.integer_variables) > 0: new_population = np.copy(population) population = round_vars(population, self.integer_variables, self.lower_boundary, self.upper_boundary) # Keep the best individual population[0, :] = best_individual # Evaluate all individuals function_values = self.function(population) if len(function_values.shape) == 2: function_values = np.squeeze(np.asarray(function_values)) # Save the best individual ind = np.argmin(function_values) best_individual = np.copy(population[ind, :]) best_value = function_values[ind] # Use the positions that are not rounded if len(self.integer_variables) > 0: population = new_population return best_individual, best_value
def pysot_cube(objective, n_trials, n_dim, with_count=False, method=None, design=None): """ Minimize :param objective: :param n_trials: :param n_dim: :param with_count: :return: """ logging.getLogger('pySOT').setLevel(logging.ERROR) num_threads = 1 asynchronous = True max_evals = n_trials gp = GenericProblem(dim=n_dim, objective=objective) if design == 'latin': exp_design = LatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1)) elif design == 'symmetric': exp_design = SymmetricLatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1)) elif design == 'factorial': exp_design = TwoFactorial(dim=n_dim) else: raise ValueError('design should be latin, symmetric or factorial') # Create a strategy and a controller # SRBFStrategy, EIStrategy, DYCORSStrategy,RandomStrategy, LCBStrategy controller = ThreadController() if method.lower() == 'srbf': surrogate = RBFInterpolant(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim), kernel=CubicKernel(), tail=LinearTail(n_dim)) controller.strategy = SRBFStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'ei': surrogate = GPRegressor(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim)) controller.strategy = EIStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'dycors': surrogate = RBFInterpolant(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim), kernel=CubicKernel(), tail=LinearTail(n_dim)) controller.strategy = DYCORSStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'lcb': surrogate = GPRegressor(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim)) controller.strategy = LCBStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'random': controller.strategy = RandomStrategy(max_evals=max_evals, opt_prob=gp) else: raise ValueError("Didn't recognize method passed to pysot") # Launch the threads and give them access to the objective function for _ in range(num_threads): worker = BasicWorkerThread(controller, gp.eval) controller.launch_worker(worker) # Run the optimization strategy result = controller.run() best_x = result.params[0].tolist() return (result.value, best_x, gp.feval_count) if with_count else (result.value, best_x)
def __init__(self, worker_id, data, response_surface, maxeval, nsamples, exp_design=None, sampling_method=None, archiving_method=None, extra=None, extra_vals=None, store_sim=False): # Check stopping criterion self.start_time = time.time() if maxeval < 0: # Time budget self.maxeval = np.inf self.time_budget = np.abs(maxeval) else: self.maxeval = maxeval self.time_budget = np.inf # Import problem information self.worker_id = worker_id self.data = data self.fhat = [] if response_surface is None: for i in range(self.data.nobj): self.fhat.append( RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval)) #MOPLS ONLY else: for i in range(self.data.nobj): response_surface.reset() # Just to be sure! self.fhat.append(deepcopy(response_surface)) #MOPLS ONLY self.ncenters = nsamples self.nsamples = 1 self.numinit = None self.extra = extra self.extra_vals = extra_vals self.store_sim = store_sim # Default to generate sampling points using Symmetric Latin Hypercube self.design = exp_design if self.design is None: if self.data.dim > 50: self.design = LatinHypercube(data.dim, data.dim + 1) else: self.design = SymmetricLatinHypercube(data.dim, 2 * (data.dim + 1)) self.xrange = np.asarray(data.xup - data.xlow) # algorithm parameters self.sigma_min = 0.005 self.sigma_max = 0.2 self.sigma_init = 0.2 self.failtol = max(5, data.dim) self.failcount = 0 self.contol = 5 self.numeval = 0 self.status = 0 self.sigma = 0 self.resubmitter = RetryStrategy() self.xbest = None self.fbest = None self.fbest_old = None self.improvement_prev = 1 # population of centers and long-term archive self.nd_archives = [] self.new_pop = [] self.sim_res = [] if archiving_method is None: self.memory_archive = NonDominatedArchive(200) else: self.memory_archive = archiving_method self.evals = [] self.maxfit = min(200, 20 * self.data.dim) self.d_thresh = 1.0 # Set up search procedures and initialize self.sampling = sampling_method if self.sampling is None: self.sampling = EvolutionaryAlgorithm(data) self.check_input() # Start with first experimental design self.sample_initial()
def setup_backend( self, params, strategy="SRBF", surrogate="RBF", design=None, ): self.opt_problem = BBoptOptimizationProblem(params) design_kwargs = dict(dim=self.opt_problem.dim) _coconut_case_match_to_1 = design _coconut_case_match_check_1 = False if _coconut_case_match_to_1 is None: _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = EmptyExperimentalDesign(**design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "latin_hypercube": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = LatinHypercube(num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "symmetric_latin_hypercube": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = SymmetricLatinHypercube( num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "two_factorial": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = TwoFactorial(**design_kwargs) if not _coconut_case_match_check_1: _coconut_match_set_name_design_cls = _coconut_sentinel _coconut_match_set_name_design_cls = _coconut_case_match_to_1 _coconut_case_match_check_1 = True if _coconut_case_match_check_1: if _coconut_match_set_name_design_cls is not _coconut_sentinel: design_cls = _coconut_case_match_to_1 if _coconut_case_match_check_1 and not (callable(design_cls)): _coconut_case_match_check_1 = False if _coconut_case_match_check_1: self.exp_design = design_cls(**design_kwargs) if not _coconut_case_match_check_1: raise TypeError( "unknown experimental design {_coconut_format_0!r}".format( _coconut_format_0=(design))) surrogate_kwargs = dict(dim=self.opt_problem.dim, lb=self.opt_problem.lb, ub=self.opt_problem.ub) _coconut_case_match_to_2 = surrogate _coconut_case_match_check_2 = False if _coconut_case_match_to_2 == "RBF": _coconut_case_match_check_2 = True if _coconut_case_match_check_2: self.surrogate = RBFInterpolant( kernel=LinearKernel() if design is None else CubicKernel(), tail=ConstantTail(self.opt_problem.dim) if design is None else LinearTail(self.opt_problem.dim), **surrogate_kwargs) if not _coconut_case_match_check_2: if _coconut_case_match_to_2 == "GP": _coconut_case_match_check_2 = True if _coconut_case_match_check_2: self.surrogate = GPRegressor(**surrogate_kwargs) if not _coconut_case_match_check_2: _coconut_match_set_name_surrogate_cls = _coconut_sentinel _coconut_match_set_name_surrogate_cls = _coconut_case_match_to_2 _coconut_case_match_check_2 = True if _coconut_case_match_check_2: if _coconut_match_set_name_surrogate_cls is not _coconut_sentinel: surrogate_cls = _coconut_case_match_to_2 if _coconut_case_match_check_2 and not (callable(surrogate_cls)): _coconut_case_match_check_2 = False if _coconut_case_match_check_2: self.surrogate = surrogate_cls(**surrogate_kwargs) if not _coconut_case_match_check_2: raise TypeError("unknown surrogate {_coconut_format_0!r}".format( _coconut_format_0=(surrogate))) strategy_kwargs = dict(max_evals=sys.maxsize, opt_prob=self.opt_problem, exp_design=self.exp_design, surrogate=self.surrogate, asynchronous=True, batch_size=1) _coconut_case_match_to_3 = strategy _coconut_case_match_check_3 = False if _coconut_case_match_to_3 == "SRBF": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = SRBFStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "EI": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = EIStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "DYCORS": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = DYCORSStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "LCB": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = LCBStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: _coconut_match_set_name_strategy_cls = _coconut_sentinel _coconut_match_set_name_strategy_cls = _coconut_case_match_to_3 _coconut_case_match_check_3 = True if _coconut_case_match_check_3: if _coconut_match_set_name_strategy_cls is not _coconut_sentinel: strategy_cls = _coconut_case_match_to_3 if _coconut_case_match_check_3 and not (callable(strategy_cls)): _coconut_case_match_check_3 = False if _coconut_case_match_check_3: self.strategy = strategy_cls(**strategy_kwargs) if not _coconut_case_match_check_3: raise TypeError("unknown strategy {_coconut_format_0!r}".format( _coconut_format_0=(strategy)))