def naive_non_dominated_sort(F, **kwargs):
    M = Dominator.calc_domination_matrix(F)

    fronts = []
    remaining = set(range(M.shape[0]))

    while len(remaining) > 0:

        front = []

        for i in remaining:

            is_dominated = False
            dominating = set()

            for j in front:
                rel = M[i, j]
                if rel == 1:
                    dominating.add(j)
                elif rel == -1:
                    is_dominated = True
                    break

            if is_dominated:
                continue
            else:
                front = [x for x in front if x not in dominating]
                front.append(i)

        [remaining.remove(e) for e in front]
        fronts.append(front)

    return fronts
def find_non_dominated(F, _F=None):
    M = Dominator.calc_domination_matrix(F, _F)
    I = np.where(np.all(M >= 0, axis=1))[0]
    return I
def fast_non_dominated_sort(F, **kwargs):
    M = Dominator.calc_domination_matrix(F)

    # calculate the dominance matrix
    n = M.shape[0]

    fronts = []

    if n == 0:
        return fronts

    # final rank that will be returned
    n_ranked = 0
    ranked = np.zeros(n, dtype=int)

    # for each individual a list of all individuals that are dominated by this one
    is_dominating = [[] for _ in range(n)]

    # storage for the number of solutions dominated this one
    n_dominated = np.zeros(n)

    current_front = []

    for i in range(n):

        for j in range(i + 1, n):
            rel = M[i, j]
            if rel == 1:
                is_dominating[i].append(j)
                n_dominated[j] += 1
            elif rel == -1:
                is_dominating[j].append(i)
                n_dominated[i] += 1

        if n_dominated[i] == 0:
            current_front.append(i)
            ranked[i] = 1.0
            n_ranked += 1

    # append the first front to the current front
    fronts.append(current_front)

    # while not all solutions are assigned to a pareto front
    while n_ranked < n:

        next_front = []

        # for each individual in the current front
        for i in current_front:

            # all solutions that are dominated by this individuals
            for j in is_dominating[i]:
                n_dominated[j] -= 1
                if n_dominated[j] == 0:
                    next_front.append(j)
                    ranked[j] = 1.0
                    n_ranked += 1

        fronts.append(next_front)
        current_front = next_front

    return fronts
Exemple #4
0
    def train(self, x, f, cross_val=False, *args, **kwargs):

        # f = (f - np.min(f))/(np.max(f)-np.min(f))
        f_normalized = self.normalize.normalize(f, self.dataset_func)
        kf = KFold(n_splits=self.n_splits)
        best_acc = 0
        best_loss = np.inf

        if self.dataset_func is False:
            cv = np.copy(f_normalized)
            index = np.any(f_normalized > 0, axis=1)
            cv[f_normalized <= 0] = 0
            cv = np.sum(cv, axis=1)
            acv = np.sum(f_normalized, axis=1)
            acv[index] = np.copy(cv[index])
            g_label = cv > 0
            g_label[cv <= 0] = -1
            # g_label = g > 0
            # g_label[g <= 0] = -1
            g_label = g_label.astype(int)
            g_label = np.vstack(g_label)

        # cross-validation
        for train_index, test_index in kf.split(x):
            train_data_x, test_data_x, train_data_f, test_data_f \
                = x[train_index], x[test_index], f_normalized[train_index], f_normalized[test_index]

            if self.dataset_func:
                self.train_dominance_matrix = Dominator.calc_domination_matrix(
                    f_normalized[train_index])
                self.test_dominance_matrix = Dominator.calc_domination_matrix(
                    f_normalized[test_index])
            else:
                train_data_cv, test_data_cv, train_g_label, test_g_label, \
                = torch.from_numpy(cv[train_index]), torch.from_numpy(cv[test_index]), \
                  torch.from_numpy(g_label[train_index]), torch.from_numpy(g_label[test_index])

            train_data_x, test_data_x, train_data_f, test_data_f \
                = torch.from_numpy(train_data_x), torch.from_numpy(test_data_x), torch.from_numpy(train_data_f), \
                  torch.from_numpy(test_data_f)

            train_indices = torch.from_numpy(
                np.asarray(range(0, train_data_x.shape[0])))
            test_indices = torch.from_numpy(
                np.asarray(range(0, test_data_x.shape[0])))

            if self.dataset_func:
                self.trainset = DatasetFunction(train_indices, train_data_x,
                                                train_data_f)
                self.testset = DatasetFunction(test_indices, test_data_x,
                                               test_data_f)
            else:
                self.trainset = DatasetConstraint(train_indices, train_data_x,
                                                  train_data_f, train_g_label)
                self.testset = DatasetConstraint(test_indices, test_data_x,
                                                 test_data_f, test_g_label)

            self.trainloader = torch.utils.data.DataLoader(
                self.trainset, batch_size=self.batch_size, shuffle=True)
            self.testloader = torch.utils.data.DataLoader(
                self.testset, batch_size=self.batch_size, shuffle=True)

            self.net = copy.deepcopy(self.neuralnet)
            self.optimizer = optim.Adam(self.net.parameters(),
                                        lr=0.01,
                                        weight_decay=5e-1,
                                        betas=(0.9, 0.999))
            self.scheduler = CosineAnnealingLR(self.optimizer,
                                               T_max=self.total_epoch,
                                               eta_min=1e-7)

            model, acc, loss = self.train_partition()
            if self.best_accuracy_model:
                if acc > best_acc:
                    self.model = model
            else:
                if loss < best_loss:
                    self.model = model

            if not self.cross_val:
                break

        return self.model
Exemple #5
0
    def calc_as_fronts(F, G=None, only_pareto_front=False):

        """
        try:
            import pygmo as pg
            ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(F)
        except ImportError:
            pass
        """

        # calculate the dominance matrix
        n = F.shape[0]
        M = Dominator.calc_domination_matrix(F, G)

        fronts = []

        # final rank that will be returned
        n_ranked = 0
        ranked = np.zeros(n, dtype=np.int)

        # for each individual a list of all individuals that are dominated by this one
        is_dominating = [[] for _ in range(n)]

        # storage for the number of solutions dominated this one
        n_dominated = np.zeros(n)

        current_front = []

        for i in range(n):

            for j in range(i + 1, n):
                rel = M[i, j]
                if rel == 1:
                    is_dominating[i].append(j)
                    n_dominated[j] += 1
                elif rel == -1:
                    is_dominating[j].append(i)
                    n_dominated[i] += 1

            if n_dominated[i] == 0:
                current_front.append(i)
                ranked[i] = 1.0
                n_ranked += 1

        if only_pareto_front:
            return current_front

        # append the first front to the current front
        fronts.append(current_front)

        # while not all solutions are assigned to a pareto front
        while n_ranked < n:

            next_front = []

            # for each individual in the current front
            for i in current_front:

                # all solutions that are dominated by this individuals
                for j in is_dominating[i]:
                    n_dominated[j] -= 1
                    if n_dominated[j] == 0:
                        next_front.append(j)
                        ranked[j] = 1.0
                        n_ranked += 1

            fronts.append(next_front)
            current_front = next_front

        return fronts