Beispiel #1
0
 def step(self):
     super().step()
     if self.dynamic_ref:
         # update ref vector to max observed if it was initially unspecified
         ref_vector = self.apply_weighting(self.p).max(axis=0)+1
         if np.any(ref_vector != self.ref_vector):
             # update hypervolume calc to include new ref and update ref
             self.hpv=FonsecaHyperVolume(reference_point=ref_vector)
             self.ref_vector = ref_vector
             self.chv = self._compute_hypervolume()
Beispiel #2
0
 def scalarise(self, x, kwargs={}):
     '''
     Hypervolume improvement computation for a given set of solutions.
     See paper for full description. 
     
     Parameters.
     -----------
     x (np.array): decision vectors.
     kwargs (dict): a dictionary of options. They are;
         'ref_vector' (np.array): reference vector
         'approximate_ref' (bool): whether to approximate reference vector 
                                 using minimum and maximum within the function 
                                 responses.
                                 
     Returns an array of hypervolume improvements.
     '''
     start = time.time()
     ref_vector = kwargs.get('ref_vector', None)
     approximate_ref = kwargs.get('approximate_ref', False)
     y = self.m_obj_eval(x)
     self.X = x
     n_data = x.shape[0]
     h = np.zeros(n_data)
     if approximate_ref:
         ref_vector = np.max(
             y, axis=0) + 0.1 * (np.max(y, axis=0) - np.min(y, axis=0))
         print("New Reference vector: ", ref_vector)
     y, comp_mat = self.get_dom_matrix(y, ref_vector)
     shells = []
     h_shells = []
     loc_comp_mat = comp_mat.copy()
     hpv = FH(ref_vector)
     del_inds = []
     # shell ranking
     while True:
         fr_inds = self.get_front(y, loc_comp_mat, del_inds)
         if fr_inds.shape[0] == 0:
             break
         shells.append(fr_inds)
         h_shells.append(hpv.assess_non_dom_front(y[fr_inds]))
         del_inds = np.concatenate([fr_inds, del_inds], axis=0)
         loc_comp_mat[:, fr_inds] = loc_comp_mat[fr_inds, :] = -1
     n_shells = len(shells)
     # hypI conputation
     for i in range(n_shells - 1):
         for j in shells[i]:
             comp_row = comp_mat[j]
             # find dominated next shell indices
             nondominated = np.where(comp_row[shells[i + 1]] == 3)[0]
             nfr = np.concatenate([[j], shells[i + 1][nondominated]])
             h[j] = hpv.assess_non_dom_front(y[nfr])
     print('Total time: ', (time.time() - start) / 60.0)
     return np.reshape(h, (-1, 1))
Beispiel #3
0
 def current_hpv(self):
     """
     Calcualte the current hypervolume. 
     """
     y = self.Y
     if self.n_obj > 1:
         n_data = self.X.shape[0]
         y, comp_mat = self.get_dom_matrix(y, self.ref_vector)
         front_inds = self.get_front(y, comp_mat)
         hpv = FH(self.ref_vector)
         return hpv.assess_non_dom_front(y[front_inds])
     else:
         return np.min(y)
Beispiel #4
0
    def __init__(self, *args, ref_vector=None, **kwargs):

        super().__init__(*args, **kwargs, )
        self.gain = -norm.ppf(0.5 * (0.5 ** (1 / self.n_objectives)))

        # set dynamic reference vector if not specified
        if ref_vector is None:
            self.dynamic_ref = True
            self.ref_vector = self.apply_weighting(self.p).max(axis=0)+1
        else:
            self.dynamic_ref = False
            try:
                self.ref_vector = np.array(self.apply_weighting(ref_vector)).reshape(-1)
                assert(self.ref_vector.shape == (self.n_objectives,))
            except AssertionError:
                raise AssertionError("Supplied reference vector is not"
                                     "formatted correctly. should be 1d array "
                                     "of size {}.".format(self.n_objectives))

        self.hpv = FonsecaHyperVolume(reference_point=self.ref_vector)
        self.chv = self._compute_hypervolume()
Beispiel #5
0
class SmsEgo(BayesianOptimiser):

    def __init__(self, *args, ref_vector=None, **kwargs):

        super().__init__(*args, **kwargs, )
        self.gain = -norm.ppf(0.5 * (0.5 ** (1 / self.n_objectives)))

        # set dynamic reference vector if not specified
        if ref_vector is None:
            self.dynamic_ref = True
            self.ref_vector = self.apply_weighting(self.p).max(axis=0)+1
        else:
            self.dynamic_ref = False
            try:
                self.ref_vector = np.array(self.apply_weighting(ref_vector)).reshape(-1)
                assert(self.ref_vector.shape == (self.n_objectives,))
            except AssertionError:
                raise AssertionError("Supplied reference vector is not"
                                     "formatted correctly. should be 1d array "
                                     "of size {}.".format(self.n_objectives))

        self.hpv = FonsecaHyperVolume(reference_point=self.ref_vector)
        self.chv = self._compute_hypervolume()
        # self.current_hv = self._compute_hypervolume()

    def _generate_filename(self):
        return super()._generate_filename()

    def _compute_hypervolume(self, p=None):
        """
        Calcualte the current hypervolume, or that of the provided y.
        """
        if p is None:
            p = self.apply_weighting(self.p)

        if self.n_objectives > 1:
            assert (p.ndim <= 2), "error in attainment front shape."
            if p.ndim == 1:
                p = p.reshape(1, self.n_objectives)
            assert(p.shape[1] == self.n_objectives)
            volume = self.hpv.assess_non_dom_front(p)
            return volume
        else:
            return np.min(p)

    def step(self):
        super().step()
        if self.dynamic_ref:
            # update ref vector to max observed if it was initially unspecified
            ref_vector = self.apply_weighting(self.p).max(axis=0)+1
            if np.any(ref_vector != self.ref_vector):
                # update hypervolume calc to include new ref and update ref
                self.hpv=FonsecaHyperVolume(reference_point=ref_vector)
                self.ref_vector = ref_vector
                self.chv = self._compute_hypervolume()
        # update hypervolume
        # self.current_hv = self._compute_hypervolume()

    def _compute_epsilon(self, p_scaled):
        n_pfr = len(p_scaled)
        c = 1 - (1 / 2 ** self.n_objectives)

        # TODO is b_count supposed to be the remaining budget?
        b_count = self.budget - self.n_evaluations - 1
        epsilon = (p_scaled.max(axis=0) - p_scaled.min(axis=0)) \
                  / (n_pfr + (c*b_count))
        return epsilon

    def _compute_penalty(self, lcb, p):
        pt = p + self._compute_epsilon(p)
        # pt = p
        # yt = lcb + self._compute_epsilon(p)
        yt = lcb
        if np.all(Pareto_split(np.vstack((yt, pt)))[0] == pt):
            assert lcb.ndim == 2
            return np.max([-1+np.prod(1+lcb-pi) for pi in p])
        else:
            return 0

    @optional_inversion
    def _scalarise_y(self, y_put, std_put):
        p = self.apply_weighting(self.p)

        if y_put.ndim < 2:
            y_put = y_put.reshape(1, -1)
            std_put = std_put.reshape(1, -1)

        assert y_put.shape[0] == 1
        assert std_put.shape[0] == 1

        # lower confidence bounds
        lcb = y_put + (self.gain * std_put)

        yt = lcb + (self._compute_epsilon(p))
        l = [-1 + np.prod(1 + lcb - p_i)
             if cs.compare_solutions(p_i, yt, [-1, -1]) == 0
             else 0 for p_i in p]

        penalty = (max([0, max(l)]))

        # penalty = self._compute_penalty(lcb, p)

        if penalty > 0:
            return -penalty
        else:
            # compute and update hypervolumes
            current_hv = self.chv
            # we use vstack(self.p, lcb) here without Pareto_split becasue
            # it is more efficient and gives the same answer. Verified
            # TODO create temporary class variable to store best hv so that
            #  it does not have to be recomputed in self.step
            put_hv = self._compute_hypervolume(np.vstack((p, y_put)))
            return put_hv - current_hv

    def alpha(self, x_put):
        y_put, var_put = self.surrogate.predict(x_put)
        return float(self._scalarise_y(y_put, var_put**0.5, invert=True))