Ejemplo n.º 1
0
 def generate_raw_samples(self, batch_shape: torch.Size) -> Tensor:
     """
     Generates raw_samples according to the settings specified in init.
     :param batch_shape: batch_shape of solutions to generate
     :return: raw samples
     """
     if self.previous_solutions is None:
         samples = (constrained_rand(
             (self.raw_samples, *batch_shape, 1, self.dim_x),
             inequality_constraints=self.inequality_constraints,
             dtype=self.dtype,
             device=self.device,
         ) * (self.bounds[1] - self.bounds[0]) + self.bounds[0])
         return samples
     else:
         if (self.previous_solutions.size(0) <
             (1 - self.random_frac) * self.raw_samples):
             num_reused = self.previous_solutions.size(0)
             num_random = self.raw_samples - num_reused
         else:
             num_reused = self.raw_samples - int(
                 self.raw_samples * self.random_frac)
             num_random = int(self.raw_samples * self.random_frac)
         idx = torch.randint(self.previous_solutions.size(0),
                             (num_reused, *batch_shape))
         reused = self.previous_solutions[idx, :, :]
         random_samples = (constrained_rand(
             (num_random, *batch_shape, 1, self.dim_x),
             inequality_constraints=self.inequality_constraints,
             dtype=self.dtype,
             device=self.device,
         ) * (self.bounds[1] - self.bounds[0]) + self.bounds[0])
         samples = torch.cat((reused, random_samples), dim=0)
         return samples
Ejemplo n.º 2
0
 def initialize_gp(self, init_samples: Tensor = None, n: int = None) -> None:
     """
     Initialize the gp with the given set of samples or number of samples.
     If none given, then defaults to n = 2 dim + 2 random samples.
     :param init_samples: Tensor of samples to initialize with. Overrides n.
     :param n: number of samples to initialize with
     """
     if init_samples is not None:
         self.X = init_samples.reshape(-1, self.dim).to(
             dtype=self.dtype, device=self.device
         )
     else:
         self.X = constrained_rand(
             (n or 2 * self.dim + 2, self.dim),
             self.function.inequality_constraints,
             dtype=self.dtype,
             device=self.device,
         )
     self.Y = self.function(self.X)
     self.fit_gp()
Ejemplo n.º 3
0
    def one_iteration(
        self, acqf: AcquisitionFunction
    ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
        """
        Do a single iteration of the algorithm
        :param acqf: The acquisition function to use. The class constructor,
            not an instance.
        :return: current best solution & value, acqf value and candidate (next sample)
        """
        iteration_start = time()
        past_only = acqf in [
            ExpectedImprovement,
            ProbabilityOfImprovement,
            NoisyExpectedImprovement,
        ]
        current_best_sol, current_best_value = self.current_best(past_only=past_only)

        if self.random_sampling:
            candidate = constrained_rand(
                (self.q, self.dim),
                self.function.inequality_constraints,
                dtype=self.dtype,
                device=self.device,
            )
            value = torch.tensor([0], dtype=self.dtype, device=self.device)
        else:
            args = {"model": self.model}
            if acqf in [ExpectedImprovement, ProbabilityOfImprovement]:
                # TO/DO: PoI gets stuck sometimes - seems like it cannot find enough
                # strictly positive entries
                args["best_f"] = current_best_value
            elif acqf == NoisyExpectedImprovement:
                # TO/DO: not supported with SingleTaskGP model
                args["X_observed"] = self.X
            elif acqf == UpperConfidenceBound:
                # TO/DO: gets negative weight while picking restart points - only
                # sometimes
                args["beta"] = getattr(self, "beta", 0.2)
            elif acqf == qMaxValueEntropy:
                args["candidate_set"] = constrained_rand(
                    (self.num_restarts * self.raw_multiplier, self.dim),
                    self.function.inequality_constraints,
                )
            elif acqf == qKnowledgeGradient:
                args["current_value"] = -current_best_value
            else:
                raise ValueError(
                    "Unexpected type / value for acqf. acqf must be a class"
                    "reference of one of the specified acqusition functions."
                )
            acqf_obj = acqf(**args)
            candidate, value = optimize_acqf(
                acq_function=acqf_obj,
                bounds=self.bounds,
                q=self.q,
                num_restarts=self.num_restarts,
                raw_samples=self.num_restarts * self.raw_multiplier,
            )
        candidate = candidate.detach()
        value = value.detach()

        if self.verbose:
            print("Candidate: ", candidate, " acqf value: ", value)

        iteration_end = time()
        print("Iteration completed in %s" % (iteration_end - iteration_start))

        candidate_point = candidate.reshape(self.q, self.dim)

        observation = self.get_obj(candidate_point)
        # update the model input data for refitting
        self.X = torch.cat((self.X, candidate_point), dim=0)
        self.Y = torch.cat((self.Y, observation), dim=0)

        # noting that X and Y are updated
        self.passed = True
        # construct and fit the GP
        self.fit_gp()
        # noting that gp fit successfully updated
        self.passed = False

        return current_best_sol, current_best_value, value, candidate_point
Ejemplo n.º 4
0
    def one_iteration(self, **kwargs) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
        """
        Do a single iteration of the algorithm
        :param kwargs: ignored
        :return: current best solution & value, kg value and candidate (next sample)
        """
        iteration_start = time()
        inner_seed = int(torch.randint(100000, (1,)))
        self.optimizer.new_iteration()
        self.inner_optimizer.new_iteration()
        current_best_sol, current_best_value = self.current_best(
            past_only=self.apx, inner_seed=inner_seed
        )

        if self.random_sampling:
            candidate = constrained_rand(
                (self.q, self.dim),
                self.function.inequality_constraints,
                dtype=self.dtype,
                device=self.device,
            )
            value = torch.tensor([0]).to(candidate)
        else:
            if self.apx_cvar:
                acqf = ApxCVaRKG(current_best_rho=current_best_value, **vars(self))
            elif self.tts_apx_cvar:
                acqf = TTSApxCVaRKG(
                    current_best_rho=current_best_value,
                    inner_optimizer=self.inner_optimizer.optimize,
                    **{_: vars(self)[_] for _ in vars(self) if _ != "inner_optimizer"}
                )
            elif self.one_shot:
                acqf = OneShotrhoKG(
                    current_best_rho=current_best_value,
                    inner_seed=inner_seed,
                    **vars(self)
                )
            elif self.apx:
                acqf = rhoKGapx(
                    current_best_rho=current_best_value,
                    past_x=self.X[:, : self.dim_x],
                    inner_seed=inner_seed,
                    **vars(self)
                )
            else:
                acqf = rhoKG(
                    inner_optimizer=self.inner_optimizer.optimize,
                    current_best_rho=current_best_value,
                    inner_seed=inner_seed,
                    **{_: vars(self)[_] for _ in vars(self) if _ != "inner_optimizer"}
                )
            if self.disc:
                candidate, value = self.optimizer.optimize_outer(
                    acqf, self.w_samples, random_w=self.random_w
                )
            else:
                candidate, value = self.optimizer.optimize_outer(
                    acqf, random_w=self.random_w
                )
        candidate = candidate.detach()
        value = value.detach()

        if self.verbose:
            print("Candidate: ", candidate, " KG value: ", value)

        iteration_end = time()
        print("Iteration completed in %s" % (iteration_end - iteration_start))

        if self.one_shot or self.apx_cvar:
            candidate_point = candidate[..., : self.q * self.dim].reshape(
                self.q, self.dim
            )
        else:
            candidate_point = candidate.reshape(self.q, self.dim)

        observation = self.function(candidate_point)
        # update the model input data for refitting
        self.X = torch.cat((self.X, candidate_point), dim=0)
        self.Y = torch.cat((self.Y, observation), dim=0)

        # noting that X and Y are updated
        self.passed = True
        # construct and fit the GP
        self.fit_gp()
        # noting that gp fit successfully updated
        self.passed = False

        return current_best_sol, current_best_value, value, candidate_point