Пример #1
0
    def _sampling(self, n_samples):
        pop = Population(n_samples)
        print(len(pop))
        pop_X, pop_hashX = [], []

        if self.benchmark_name == 'cifar10' or self.benchmark_name == 'cifar100':
            allowed_choices = ['I', '1', '2']

            i = 0
            while i < n_samples:
                new_X = np.random.choice(allowed_choices, 14)
                new_hashX = convert_X_to_hashX(new_X)
                if new_hashX not in pop_hashX:
                    pop[i].set('X', new_X)
                    pop[i].set('hashX', new_hashX)
                    i += 1

        else:
            while len(pop_X) < n_samples:
                matrix_2D, ops_STRING = create_model()
                modelspec = api.ModelSpec(matrix=matrix_2D, ops=ops_STRING)

                if self.benchmark_api.is_valid(modelspec):
                    hashX = self.benchmark_api.get_module_hash(modelspec)

                    if hashX not in pop_hashX:
                        matrix_1D = decoding_matrix(matrix_2D)
                        ops_INT = decoding_ops(ops_STRING)

                        X = combine_matrix1D_and_opsINT(matrix=matrix_1D, ops=ops_INT)
                        pop_X.append(X)
                        pop_hashX.append(hashX)

        return pop
Пример #2
0
    def _do(self, problem, P, O, **kwargs):
        P_hash_key = P.get('hash_key')
        O_old_X = O.get('X')

        len_P = len(P)
        len_X = len(O_old_X[-1])

        n_mutations = 0
        max_n_mutations = len_P * 5

        self.prob = 1 / len_X

        O_new = Population(len_P)
        O_new_hash_key = []

        n = 0
        while True:
            for X_old in O_old_X:
                X = X_old.copy()

                for m, prob in enumerate(np.random.rand(len_X)):
                    if prob <= self.prob:
                        if problem.problem_name == 'SpeechRecognition':
                            if m in problem.IDX_MAIN_OPS:
                                valid_ops = problem.MAIN_OPS.copy()
                            else:
                                valid_ops = problem.SKIP_OPS.copy()
                        elif problem.problem_name == '101':
                            if m == 0 or m == 21:
                                continue
                            elif m in problem.IDX_OPS:
                                valid_ops = problem.OPS.copy()
                            else:
                                valid_ops = problem.EDGES.copy()
                        else:
                            valid_ops = problem.valid_ops.copy()
                        valid_ops.remove(X[m])
                        X[m] = np.random.choice(valid_ops)

                if problem.checking_valid(X):
                    hash_key = problem.get_hash_key(X)
                    if checking_valid(hash_key, O=O_new_hash_key, P=P_hash_key, DS=kwargs['algorithm'].E_Archive.DS) \
                            or (n_mutations - max_n_mutations > 0):
                        O_new_hash_key.append(hash_key)

                        F = kwargs['algorithm'].evaluate(X, kwargs['algorithm'].using_surrogate_model)
                        O_new[n].set('X', X)
                        O_new[n].set('hash_key', hash_key)
                        O_new[n].set('F', F)

                        if not kwargs['algorithm'].using_surrogate_model:
                            kwargs['algorithm'].E_Archive.update(O_new[n])
                        else:
                            kwargs['algorithm'].E_Archive_1.update(O_new[n])

                        n += 1
                        if n - len_P == 0:
                            return O_new
            n_mutations += 1
Пример #3
0
    def mutation(self, P, O):
        P_hash_ = P.get('hash_')

        n_mutations = 0
        max_n_mutations = self.pop_size * 5
        prob_M = 1 / len(O[-1].X)

        O_old_X = O.get('X')

        O_new = Population(self.pop_size)
        O_hash_ = []
        n = 0

        n_columns = O_old_X.shape[1]

        full = False
        while not full:
            for X_old in O_old_X:
                X = X_old.copy()

                prob = np.random.rand(n_columns)
                for m, prob_ in enumerate(prob):
                    if prob_ <= prob_M:
                        if m in IDX_MAIN_OPS:
                            valid_ops = MAIN_OPS.copy()
                        else:
                            valid_ops = SKIP_OPS.copy()
                        valid_ops.remove(X[m])
                        X[m] = np.random.choice(valid_ops)

                hash_ = get_hash_(X)
                if checking_valid(hash_,
                                  O_hash_=O_hash_,
                                  P_hash_=P_hash_,
                                  DS=self.E_Archive.DS
                                  ) or n_mutations - max_n_mutations > 0:
                    O_hash_.append(hash_)

                    F, doEvaluateTwice = self.evaluate(
                        X, self.using_surrogate_model)
                    O_new[n].set('X', X)
                    O_new[n].set('hash_', hash_)
                    O_new[n].set('F', F)

                    if not self.using_surrogate_model:
                        self.E_Archive.update(O_new[n])
                    else:
                        if doEvaluateTwice:
                            self.E_Archive.update(O_new[n])
                        else:
                            self.E_Archive_1.update(O_new[n])
                            self.training_set.append(O_new[n])
                    n += 1
                    if n - self.pop_size == 0:
                        full = True
                        break
        return O_new
Пример #4
0
    def _sampling(self, n_samples):
        P = Population(n_samples)
        P_hashX = []
        i = 0

        if BENCHMARK_NAME == 'NAS-Bench-101':
            while i < n_samples:
                matrix_2D, ops_STRING = create_model()
                MS = api.ModelSpec(matrix=matrix_2D, ops=ops_STRING)

                if BENCHMARK_API.is_valid(MS):
                    hashX = BENCHMARK_API.get_module_hash(MS)

                    if hashX not in P_hashX:
                        P_hashX.append(hashX)

                        matrix_1D = decoding_matrix(matrix_2D)
                        ops_INT = decoding_ops(ops_STRING)

                        X = combine_matrix1D_and_opsINT(matrix=matrix_1D,
                                                        ops=ops_INT)
                        F, _ = self.evaluate(X=X, using_surrogate_model=False)

                        P[i].set('X', X)
                        P[i].set('hashX', hashX)
                        P[i].set('F', F)
                        self.update_A(P[i])

                        i += 1

        else:
            if BENCHMARK_NAME == 'NAS-Bench-201-CIFAR-10' or BENCHMARK_NAME == 'NAS-Bench-201-CIFAR-100'\
                    or BENCHMARK_NAME == 'NAS-Bench-201-ImageNet16-120':
                l = 6
                opt = ['0', '1', '2', '3', '4']
            else:
                l = 14
                opt = ['I', '1', '2']

            while i < n_samples:
                X = np.random.choice(opt, l)
                hashX = convert_to_hashX(X, BENCHMARK_NAME)
                if hashX not in P_hashX:
                    P_hashX.append(hashX)

                    F, _ = self.evaluate(X=X)

                    P[i].set('X', X)
                    P[i].set('hashX', hashX)
                    P[i].set('F', F)
                    self.update_A(P[i])

                    i += 1

        return P
Пример #5
0
    def _do(self, problem, P, **kwargs):
        len_P = len(P)
        O = Population(len_P)
        O_hash_key = []

        n = 0
        n_crossovers = 0
        max_n_crossovers = len_P * 5
        while True:
            I = np.random.choice(len_P,
                                 size=(len_P // 2, self.n_parents),
                                 replace=False)
            _P = P[I]
            for i in range(len(_P)):
                if np.random.random() < self.prob:
                    o1_X, o2_X = crossover(_P[i][0].X, _P[i][1].X, self.method)
                    o_X = [o1_X, o2_X]
                    for j, X in enumerate(o_X):
                        if problem.checking_valid(o_X[j]):
                            hash_key = problem.get_hash_key(o_X[j])
                            if checking_valid(hash_key, O=O_hash_key, DS=kwargs['algorithm'].E_Archive.DS) \
                                    or (n_crossovers - max_n_crossovers > 0):
                                O_hash_key.append(hash_key)

                                F = kwargs['algorithm'].evaluate(
                                    o_X[j],
                                    kwargs['algorithm'].using_surrogate_model)
                                O[n].set('X', o_X[j])
                                O[n].set('hash_key', hash_key)
                                O[n].set('F', F)

                                if not kwargs[
                                        'algorithm'].using_surrogate_model:
                                    kwargs['algorithm'].E_Archive.update(O[n])
                                else:
                                    kwargs['algorithm'].E_Archive_1.update(
                                        O[n])
                                n += 1
                                if n - len_P == 0:
                                    return O
                else:
                    for o in [_P[i][0], _P[i][1]]:
                        O[n].set('X', o.X)
                        O[n].set('hash_key', o.hash_key)
                        O[n].set('F', o.F)
                        n += 1
                        if n - len_P == 0:
                            return O
            n_crossovers += 1
Пример #6
0
    def crossover(self, P, prob_C=0.9):
        n_crossovers = 0
        max_n_crossovers = self.pop_size * 5

        n = 0
        O = Population(self.pop_size)
        O_hash_ = []

        full = False
        while not full:
            idx_parents = selecting_parents(P)
            if np.random.random() < prob_C:
                off_X = self._crossover(P, idx_parents, self.type_C)
                for X in off_X:
                    hash_ = get_hash_(X)
                    if checking_valid(
                            hash_, O_hash_=O_hash_, DS=self.E_Archive.DS
                    ) or n_crossovers > max_n_crossovers:
                        O_hash_.append(hash_)
                        F, doEvaluateTwice = self.evaluate(
                            X, using_SM=self.using_surrogate_model)

                        O[n].set('X', X)
                        O[n].set('hash_', hash_)
                        O[n].set('F', F)

                        if not self.using_surrogate_model:
                            self.E_Archive.update(O[n])
                        else:
                            if doEvaluateTwice:
                                self.E_Archive.update(O[n])
                            else:
                                self.E_Archive_1.update(O[n])
                                self.training_set.append(O[n])
                        n += 1
                        if n - self.pop_size == 0:
                            full = True
                            break
            else:
                for off in P[idx_parents]:
                    O[n].set('X', off.X)
                    O[n].set('hash_', off.hash_)
                    O[n].set('F', off.F)
                    n += 1
                    if n - self.pop_size == 0:
                        full = True
                        break
            n_crossovers += 1
        return O
Пример #7
0
    def sampling(self):
        P = Population(self.pop_size)
        P_hash_ = []
        n = 0
        while n - self.pop_size < 0:
            X = get_random_X()
            hash_ = get_hash_(X)
            if checking_valid(hash_, P_hash_=P_hash_, DS=self.E_Archive.DS):
                P_hash_.append(hash_)

                F, _ = self.evaluate(X)
                P[n].set('X', X)
                P[n].set('hash_', hash_)
                P[n].set('F', F)

                self.E_Archive.update(P[n])
                n += 1
        return P
Пример #8
0
def sampling(n_samples):
    P = Population(n_samples)
    P_hashX = []
    i = 0
    allowed_choices = ['0', '1', '2', '3', '4']
    while i < n_samples:
        X = np.random.choice(allowed_choices, 6)
        hashX = convert_to_hashX_NAS201(X)
        if hashX not in P_hashX:
            P_hashX.append(hashX)

            F = evaluate(X=X)

            P[i].set('X', X)
            P[i].set('hashX', hashX)
            P[i].set('F', F)

            i += 1
    return P
Пример #9
0
 def do(self, problem, **kwargs):
     algorithm = kwargs['algorithm']
     P = Population(self.n_samples)
     n = 0
     P_hash_key = []
     P_X = []
     while self.n_samples - n > 0:
         X = problem.get_X()
         if problem.checking_valid(X):
             hash_key = problem.get_hash_key(X)
             if checking_valid(hash_key,
                               P=P_hash_key,
                               EA=algorithm.E_Archive.hash_key,
                               DS=algorithm.E_Archive.DS):
                 P_X.append(X)
                 P_hash_key.append(hash_key)
                 n += 1
     P.set('X', P_X)
     P.set('hash_key', P_hash_key)
     return P
Пример #10
0
    def _do_each_gen(self, first=False):
        if self.using_surrogate_model:
            self.alpha = np.mean(self.F_total)

            if self.m_nEs - self.nEs < 2 * self.m_nEs // 3 or self.n_updates == 15:
                self.update_model = False

            if not first:
                tmp_set_X = np.array(self.tmp_A_X)
                tmp_set_hashX = np.array(self.tmp_A_hashX)

                tmp_set = Population(len(self.tmp_A_X))
                tmp_set.set('X', tmp_set_X)
                tmp_set.set('hashX', tmp_set_hashX)
                for i in range(len(tmp_set)):
                    if (tmp_set[i].get('hashX')
                            not in self.A_hashX) and (tmp_set[i].get('hashX')
                                                      not in self.DS):
                        F, _ = self.evaluate(tmp_set[i].get('X'),
                                             using_surrogate_model=False,
                                             count_nE=True)
                        tmp_set[i].set('F', F)
                        self.update_A(tmp_set[i])

            if self.n_gen % self.update_model_after_n_gens == 0:
                data = np.array(self.training_data)
                self.training_data = []

                X = []
                Y = []
                checked = []
                for i in range(len(data)):
                    if BENCHMARK_NAME == 'NAS-Bench-101':
                        matrix_1D, ops_INT = split_to_matrix1D_and_opsINT(
                            data[i].get('X'))

                        matrix_2D = encoding_matrix(matrix_1D)
                        ops_STRING = encoding_ops(ops_INT)
                        modelspec = api.ModelSpec(matrix=matrix_2D,
                                                  ops=ops_STRING)
                        hashX = BENCHMARK_API.get_module_hash(modelspec)
                    else:
                        hashX = convert_to_hashX(data[i].get('X'),
                                                 BENCHMARK_NAME)
                    if (hashX not in checked) and (hashX not in self.DS) and (
                            hashX not in self.A_hashX):
                        checked.append(hashX)
                        F, _ = self.evaluate(data[i].get('X'),
                                             using_surrogate_model=False,
                                             count_nE=True)
                        data[i].set('F', F)
                        self.update_A(data[i])
                        X.append(data[i].get('X'))
                        Y.append(F[1])
                for i in range(len(self.A_X)):
                    X.append(self.A_X[i])
                    Y.append(self.A_F[i][1])
                X = np.array(X)
                Y = np.array(Y)
                if self.update_model:
                    self.n_updates += 1
                    if BENCHMARK_NAME == 'NAS-Bench-101':
                        self.surrogate_model.fit(x=X, y=Y)
                    else:
                        self.surrogate_model.fit(x=encode(X, BENCHMARK_NAME),
                                                 y=Y,
                                                 verbose=False)

        if DEBUG:
            print(f'Number of evaluations used: {self.nEs}/{self.m_nEs}')

        if SAVE:
            pf = np.array(self.A_F)
            pf = np.unique(pf, axis=0)
            pf = pf[np.argsort(pf[:, 0])]
            pk.dump(
                [pf, self.nEs],
                open(
                    f'{self.path}/pf_eval/pf_and_evaluated_gen_{self.n_gen}.p',
                    'wb'))
            self.worst_f0 = max(self.worst_f0, np.max(pf[:, 0]))
            self.worst_f1 = max(self.worst_f1, np.max(pf[:, 1]))

            dpfs = round(cal_dpfs(pareto_s=pf, pareto_front=BENCHMARK_PF_TRUE),
                         6)
            print(self.nEs, dpfs)
            if len(self.no_eval) == 0:
                self.dpfs.append(dpfs)
                self.no_eval.append(self.nEs)
            else:
                if self.nEs == self.no_eval[-1]:
                    self.dpfs[-1] = dpfs
                else:
                    self.dpfs.append(dpfs)
                    self.no_eval.append(self.nEs)
Пример #11
0
    def _mutation(self, P, O):
        P_hashX = P.get('hashX')

        new_O = Population(len(O))

        new_O_hashX = []

        old_O_X = O.get('X')

        i = 0
        full = False
        pM = 1 / len(old_O_X[0])
        while not full:
            if BENCHMARK_NAME == 'NAS-Bench-101':
                for x in old_O_X:
                    matrix_1D, ops_INT = split_to_matrix1D_and_opsINT(x)

                    new_matrix_1D = matrix_1D.copy()
                    new_ops_INT = ops_INT.copy()

                    pM_idxs_matrix = np.random.rand(len(new_matrix_1D))
                    for j in range(len(pM_idxs_matrix)):
                        if pM_idxs_matrix[j] <= pM:
                            new_matrix_1D[j] = 1 - new_matrix_1D[j]

                    pM_idxs_ops = np.random.rand(len(new_ops_INT))
                    for j in range(len(pM_idxs_ops)):
                        if pM_idxs_ops[j] <= pM:
                            choices = [0, 1, 2]
                            choices.remove(new_ops_INT[j])
                            new_ops_INT[j] = np.random.choice(choices)

                    matrix_2D = encoding_matrix(new_matrix_1D)
                    ops_STRING = encoding_ops(new_ops_INT)

                    new_MS = api.ModelSpec(matrix_2D, ops_STRING)

                    if BENCHMARK_API.is_valid(new_MS):
                        hashX = BENCHMARK_API.get_module_hash(new_MS)
                        if (hashX not in new_O_hashX) and (
                                hashX not in P_hashX) and (hashX
                                                           not in self.DS):
                            X = combine_matrix1D_and_opsINT(
                                new_matrix_1D, new_ops_INT)
                            new_O_hashX.append(hashX)

                            F, twice = self.evaluate(
                                X=X,
                                using_surrogate_model=self.
                                using_surrogate_model)

                            new_O[i].set('X', X)
                            new_O[i].set('hashX', hashX)
                            new_O[i].set('F', F)

                            if not self.using_surrogate_model:
                                self.update_A(new_O[i])
                            else:
                                if twice:
                                    self.update_A(new_O[i])
                                else:
                                    self.training_data.append(new_O[i])
                                    self.update_fake_A(new_O[i])

                            i += 1
                            if i == len(P):
                                full = True
                                break

            else:
                if BENCHMARK_NAME == 'MacroNAS-CIFAR-10' or BENCHMARK_NAME == 'MacroNAS-CIFAR-100':
                    opt = ['I', '1', '2']
                else:
                    opt = ['0', '1', '2', '3', '4']

                pM_idxs = np.random.rand(old_O_X.shape[0], old_O_X.shape[1])

                for m in range(len(old_O_X)):
                    X = old_O_X[m].copy()

                    for n in range(pM_idxs.shape[1]):
                        if pM_idxs[m][n] <= pM:
                            allowed_opt = opt.copy()
                            allowed_opt.remove(X[n])

                            X[n] = np.random.choice(allowed_opt)

                    hashX = convert_to_hashX(X, BENCHMARK_NAME)

                    if (hashX not in new_O_hashX) and (
                            hashX not in P_hashX) and (hashX not in self.DS):
                        new_O_hashX.append(hashX)

                        F, twice = self.evaluate(
                            X=X,
                            using_surrogate_model=self.using_surrogate_model)

                        new_O[i].set('X', X)
                        new_O[i].set('hashX', hashX)
                        new_O[i].set('F', F)

                        if not self.using_surrogate_model:
                            self.update_A(new_O[i])
                        else:
                            if twice:
                                self.update_A(new_O[i])
                            else:
                                self.training_data.append(new_O[i])
                                self.update_fake_A(new_O[i])

                        i += 1
                        if i == len(P):
                            full = True
                            break

        return new_O
Пример #12
0
    def _crossover(self, P, pC=0.9):
        O = Population(len(P))
        O_hashX = []

        nCOs = 0  # --> Avoid to stuck

        i = 0
        full = False
        while not full:
            idx = np.random.choice(len(P),
                                   size=(len(P) // 2, 2),
                                   replace=False)
            P_ = P[idx]

            if BENCHMARK_NAME == 'NAS-Bench-101':
                for j in range(len(P_)):
                    if np.random.random() < pC:
                        new_O1_X, new_O2_X = crossover(P_[j][0].get('X'),
                                                       P_[j][1].get('X'),
                                                       self.typeC)

                        matrix1_1D, ops1_INT = split_to_matrix1D_and_opsINT(
                            new_O1_X)
                        matrix2_1D, ops2_INT = split_to_matrix1D_and_opsINT(
                            new_O2_X)

                        matrix1_2D = encoding_matrix(matrix1_1D)
                        matrix2_2D = encoding_matrix(matrix2_1D)

                        ops1_STRING = encoding_ops(ops1_INT)
                        ops2_STRING = encoding_ops(ops2_INT)

                        new_MS1 = api.ModelSpec(matrix=matrix1_2D,
                                                ops=ops1_STRING)
                        new_MS2 = api.ModelSpec(matrix=matrix2_2D,
                                                ops=ops2_STRING)

                        new_MS_lst = [new_MS1, new_MS2]
                        new_O_X_lst = [new_O1_X, new_O2_X]

                        for m in range(2):
                            if BENCHMARK_API.is_valid(new_MS_lst[m]):
                                new_O_hashX = BENCHMARK_API.get_module_hash(
                                    new_MS_lst[m])
                                if nCOs <= 100:
                                    if (new_O_hashX not in O_hashX) and (
                                            new_O_hashX not in self.DS):
                                        O_hashX.append(new_O_hashX)

                                        new_O_F, twice = self.evaluate(
                                            X=new_O_X_lst[m],
                                            using_surrogate_model=self.
                                            using_surrogate_model)
                                        O[i].set('X', new_O_X_lst[m])
                                        O[i].set('hashX', new_O_hashX)
                                        O[i].set('F', new_O_F)

                                        if not self.using_surrogate_model:
                                            self.update_A(O[i])
                                        else:
                                            if twice:
                                                self.update_A(O[i])
                                            else:
                                                self.training_data.append(O[i])
                                                self.update_fake_A(O[i])

                                        i += 1
                                        if i == len(P):
                                            full = True
                                            break
                                else:
                                    O_hashX.append(new_O_hashX)

                                    new_O_F, twice = self.evaluate(
                                        X=new_O_X_lst[m],
                                        using_surrogate_model=self.
                                        using_surrogate_model)
                                    O[i].set('X', new_O_X_lst[m])
                                    O[i].set('hashX', new_O_hashX)
                                    O[i].set('F', new_O_F)

                                    if not self.using_surrogate_model:
                                        self.update_A(O[i])
                                    else:
                                        if twice:
                                            self.update_A(O[i])
                                        else:
                                            self.training_data.append(O[i])
                                            self.update_fake_A(O[i])

                                    i += 1
                                    if i == len(P):
                                        full = True
                                        break
                    else:
                        for m in range(2):
                            O[i].set('X', P_[j][m].get('X'))
                            O[i].set('hashX', P_[j][m].get('hashX'))
                            O[i].set('F', P_[j][m].get('F'))
                            i += 1
                            if i == len(P):
                                full = True
                                break
                    if full:
                        break
            else:
                for j in range(len(P_)):
                    if np.random.random() < pC:
                        o1_X, o2_X = crossover(P_[j][0].get('X'),
                                               P_[j][1].get('X'), self.typeC)

                        o_X = [o1_X, o2_X]
                        o_hashX = [
                            convert_to_hashX(o1_X, BENCHMARK_NAME),
                            convert_to_hashX(o2_X, BENCHMARK_NAME)
                        ]

                        if nCOs <= 100:
                            for m in range(2):
                                if (o_hashX[m]
                                        not in O_hashX) and (o_hashX[m]
                                                             not in self.DS):
                                    O_hashX.append(o_hashX[m])
                                    o_F, twice = self.evaluate(
                                        X=o_X[m],
                                        using_surrogate_model=self.
                                        using_surrogate_model)

                                    O[i].set('X', o_X[m])
                                    O[i].set('hashX', o_hashX[m])
                                    O[i].set('F', o_F)

                                    if not self.using_surrogate_model:
                                        self.update_A(O[i])
                                    else:
                                        if twice:
                                            self.update_A(O[i])
                                        else:
                                            self.training_data.append(O[i])
                                            self.update_fake_A(O[i])

                                    i += 1
                                    if i == len(P):
                                        full = True
                                        break
                        else:
                            for m in range(2):
                                O_hashX.append(o_hashX[m])
                                o_F, twice = self.evaluate(
                                    X=o_X[m],
                                    using_surrogate_model=self.
                                    using_surrogate_model)

                                O[i].set('X', o_X[m])
                                O[i].set('hashX', o_hashX[m])
                                O[i].set('F', o_F)

                                if not self.using_surrogate_model:
                                    self.update_A(O[i])
                                else:
                                    if twice:
                                        self.update_A(O[i])
                                    else:
                                        self.training_data.append(O[i])
                                        self.update_fake_A(O[i])
                                i += 1
                                if i == len(P):
                                    full = True
                                    break

                    else:
                        for m in range(2):
                            O[i].set('X', P_[j][m].get('X'))
                            O[i].set('hashX', P_[j][m].get('hashX'))
                            O[i].set('F', P_[j][m].get('F'))
                            i += 1
                            if i == len(P):
                                full = True
                                break

                    if full:
                        break
            nCOs += 1
        return O
Пример #13
0
    def _do(self, problem, pop, off, **kwargs):
        pop_HASH_ = pop.get('hashX')
        off_X = off.get('X')

        self.prob = 1 / len(pop[0].X)

        _off = Population(len(pop))
        _off_HASH_ = []

        n = 0
        n_mutations = 0
        while True:
            if problem.name == '101':
                for x in old_O_X:
                    matrix_1D, ops_INT = split_to_matrix1D_and_opsINT(x)

                    new_matrix_1D = matrix_1D.copy()
                    new_ops_INT = ops_INT.copy()

                    pM_idxs_matrix = np.random.rand(len(new_matrix_1D))
                    for j in range(len(pM_idxs_matrix)):
                        if pM_idxs_matrix[j] <= pM:
                            new_matrix_1D[j] = 1 - new_matrix_1D[j]

                    pM_idxs_ops = np.random.rand(len(new_ops_INT))
                    for j in range(len(pM_idxs_ops)):
                        if pM_idxs_ops[j] <= pM:
                            choices = [0, 1, 2]
                            choices.remove(new_ops_INT[j])
                            new_ops_INT[j] = np.random.choice(choices)

                    matrix_2D = encoding_matrix(new_matrix_1D)
                    ops_STRING = encoding_ops(new_ops_INT)

                    new_MS = api.ModelSpec(matrix_2D, ops_STRING)

                    if BENCHMARK_API.is_valid(new_MS):
                        hashX = BENCHMARK_API.get_module_hash(new_MS)
                        if (hashX not in new_O_hashX) and (hashX not in P_hashX) and (hashX not in self.DS):
                            X = combine_matrix1D_and_opsINT(new_matrix_1D, new_ops_INT)
                            new_O_hashX.append(hashX)

                            F, twice = self.evaluate(X=X, using_surrogate_model=self.using_surrogate_model)

                            new_O[i].set('X', X)
                            new_O[i].set('hashX', hashX)
                            new_O[i].set('F', F)

                            if not self.using_surrogate_model:
                                self.update_A(new_O[i])
                            else:
                                if twice:
                                    self.update_A(new_O[i])
                                else:
                                    self.training_data.append(new_O[i])
                                    self.update_fake_A(new_O[i])

                            i += 1
                            if i == len(P):
                                return new_O

            elif problem.name == 'MacroNAS' or problem.name == '201':
                pM = np.random.rand(off_X.shape[0], off_X.shape[1])

                for i in range(off_X.shape[0]):
                    x = off_X[i].copy()

                    for j in range(pM.shape[1]):
                        if pM[i][j] <= self.prob:
                            allowed_opt = problem.opt.copy()
                            allowed_opt.remove(x[j])

                            x[j] = np.random.choice(allowed_opt)

                    hash_ = encode_to_hash(x, problem.name)

                    if valid_(hash_, problem=problem, off=_off_HASH_, pop=pop_HASH_, **kwargs) and n_mutations > 100:
                        _off_HASH_.append(hash_)

                        F, doTwiceEvaluate = kwargs['algorithm'].evaluate(x, kwargs['algorithm'].using_surrogate_model)

                        off[n].set('X', x)
                        off[n].set('hashX', hash_)
                        off[n].set('F', F)

                        if not kwargs['algorithm'].using_surrogate_model:
                            kwargs['algorithm'].elitist_archive.update(off[n])
                        else:
                            if doTwiceEvaluate:
                                kwargs['algorithm'].elitist_archive.update(off[n])
                            else:
                                kwargs['algorithm'].elitist_archive_tmp.update(off[n])
                                kwargs['algorithm'].training_set.append(off[n])

                        n += 1
                        if n == len(pop):
                            return off
                n_mutations += 1
Пример #14
0
    def do_each_gen(self, first=False):
        self._do_each_gen(first)
        if self.using_surrogate_model:
            if not first:
                E_Archive_1_X = np.array(self.E_Archive_1.X)
                E_Archive_1_hash_key = np.array(self.E_Archive_1.hash_key)

                len_EA_tmp = len(E_Archive_1_X)

                tmp_set = Population(len_EA_tmp)
                tmp_set.set('X', E_Archive_1_X)
                tmp_set.set('hash_key', E_Archive_1_hash_key)

                for i, X in enumerate(E_Archive_1_X):
                    if checking_valid(E_Archive_1_hash_key[i],
                                      DS=self.E_Archive.DS,
                                      EA=self.E_Archive.hash_key):
                        F = self.evaluate(X)
                        tmp_set[i].set('F', F)
                        self.E_Archive.update(tmp_set[i])
                self.E_Archive_1 = ElitistArchive()
            # E_Archive_1_X = np.array(self.E_Archive_1.X)
            # E_Archive_1_hash_key = np.array(self.E_Archive_1.hash_key)
            #
            # len_EA_tmp = len(E_Archive_1_X)
            #
            # tmp_set = Population(len_EA_tmp)
            # tmp_set.set('X', E_Archive_1_X)
            # tmp_set.set('hash_key', E_Archive_1_hash_key)
            #
            # for i, X in enumerate(E_Archive_1_X):
            #     if checking_valid(E_Archive_1_hash_key[i], DS=self.E_Archive.DS, EA=self.E_Archive.hash_key):
            #         F = self.evaluate(X)
            #         tmp_set[i].set('F', F)
            #         self.E_Archive.update(tmp_set[i])
            # self.E_Archive_1 = ElitistArchive()

        if not self.E_Archive.having_change:
            self.NIS += 1
        else:
            self.NIS = 0
            self.E_Archive.having_change = False
        ''' Get the non-dominated front (elitist archive) - testing error '''
        non_dominated_front_validation = np.array(self.E_Archive.F)
        non_dominated_front_validation = np.unique(
            non_dominated_front_validation, axis=0)

        # F = np.array([self.problem.evaluate_(X) for X in self.E_Archive.X])
        # F = np.unique(F, axis=0)
        # len_F = len(F)
        # r = np.zeros(len_F, dtype=np.int8)
        # for i, F_ in enumerate(F):
        #     if r[i] == 0:
        #         for j in range(i + 1, len_F):
        #             better_idv = finding_the_better(f0_0=F_[0], f0_1=F_[1],
        #                                             f1_0=F[j][0], f1_1=F[j][1])
        #             if better_idv == 0:
        #                 r[j] += 1
        #             elif better_idv == 1:
        #                 r[i] += 1
        #                 break
        # non_dominated_front_testing = F[r == 0]
        ''' Update reference point (use for calculating the Hypervolume value) '''
        self.reference_point_validation[0] = max(
            self.reference_point_validation[0],
            max(non_dominated_front_validation[:, 0]))
        self.reference_point_validation[1] = max(
            self.reference_point_validation[1],
            max(non_dominated_front_validation[:, 1]))

        # self.reference_point_testing[0] = max(self.reference_point_testing[0], max(non_dominated_front_testing[:, 0]))
        # self.reference_point_testing[1] = max(self.reference_point_testing[1], max(non_dominated_front_testing[:, 1]))

        # Calculate the IGD value
        IGD_validation = calculating_IGD(
            approximate_pf=non_dominated_front_validation,
            true_pf=self.problem.true_pf_validation)
        # IGD_testing = calculating_IGD(approximate_pf=non_dominated_front_testing, true_pf=self.problem.true_pf_testing)

        if (len(self.n_evals_history) == 0) or \
                ((len(self.n_evals_history) != 0) and (self.n_evals != self.n_evals_history[-1])):
            self.IGD_validation_history.append(IGD_validation)
            # self.IGD_testing_history.append(IGD_testing)
            self.n_evals_history.append(self.n_evals)
        else:
            self.IGD_validation_history[-1] = IGD_validation
            # self.IGD_testing_history[-1] = IGD_testing

        if self.IGD_validation_history[-1] == 0.0 and not self.stop_searching:
            self.stop_searching = True
            self.converging_point = self.n_evals

        # Save the results
        p.dump(
            [non_dominated_front_validation, self.n_evals],
            open(
                f'{self.path}/pf_and_n_evals_validation/gen_{self.n_true_gens}.p',
                'wb'))
        # p.dump([non_dominated_front_testing, self.n_evals],
        #        open(f'{self.path}/pf_and_n_evals_testing/gen_{self.n_true_gens}.p', 'wb'))
        p.dump(
            self.E_Archive,
            open(f'{self.path}/elitist_archive/gen_{self.n_true_gens}.p',
                 'wb'))
Пример #15
0
    def do_each_gen(self, first=False):
        if self.using_surrogate_model:
            self.alpha = np.mean(self.F_history)

            if not first:
                EA_tmp_X = np.array(self.elitist_archive_tmp.X)
                EA_tmp_HASH_ = np.array(self.elitist_archive_tmp.HASH_)
                l_EA_tmp = len(EA_tmp_X)
                tmp_set = Population(l_EA_tmp)
                tmp_set.set('X', EA_tmp_X)
                tmp_set.set('hashX', EA_tmp_HASH_)
                for i in range(l_EA_tmp):
                    if (EA_tmp_HASH_[i] not in self.elitist_archive.DS) and (
                            EA_tmp_HASH_[i] not in self.elitist_archive.HASH_):
                        F, _ = self.evaluate(EA_tmp_X[i],
                                             using_surrogate_model=False)
                        tmp_set[i].set('F', F)
                        self.elitist_archive.update(tmp_set[i])

            if self.update_model and self.n_gens % self.update_model_each_n_gens == 0:
                self.y_pred = np.array(self.y_pred)
                self.y = np.array(self.y)

                error = 1 / len(self.y) * np.sum((self.y - self.y_pred)**2)
                print('error:', error)
                if error <= 1e-3:
                    self.update_model = False
                else:
                    self.y_pred, self.y = [], []
                    data = np.array(self.training_set)
                    self.training_set = []

                    X = []
                    Y = []
                    checked = []
                    for i in range(len(data)):
                        if self.problem.name == '101':
                            matrix_1D, ops_INT = split_to_matrix1D_and_opsINT(
                                data[i].get('X'))

                            matrix_2D = encoding_matrix(matrix_1D)
                            ops_STRING = encoding_ops(ops_INT)
                            modelspec = api.ModelSpec(matrix=matrix_2D,
                                                      ops=ops_STRING)
                            hashX = self.problem.api.get_module_hash(modelspec)
                        else:
                            hashX = encode_to_hash(data[i].X,
                                                   self.problem.name)
                        if (hashX not in checked) and \
                                (hashX not in self.elitist_archive.DS) and \
                                (hashX not in self.elitist_archive.HASH_):
                            checked.append(hashX)
                            F, _ = self.evaluate(data[i].X,
                                                 using_surrogate_model=False)
                            data[i].set('F', F)

                            self.elitist_archive.update(data[i])
                            X.append(data[i].X)
                            Y.append(F[1])

                    for i, F in enumerate(self.elitist_archive.F):
                        X.append(self.elitist_archive.X[i])
                        Y.append(F[1])
                    X = np.array(X)
                    Y = np.array(Y)

                    if self.problem.name == '101':
                        self.surrogate_model.fit(x=X, y=Y)
                    else:
                        self.surrogate_model.fit(x=encode_for_surrogate_model(
                            X, self.problem.name),
                                                 y=Y,
                                                 verbose=False)

        self._do_each_gen(first)
        # Get the non-dominated front (elitist archive)
        non_dominated_front = np.array(self.elitist_archive.F)
        non_dominated_front = np.unique(non_dominated_front, axis=0)
        non_dominated_front = non_dominated_front[np.argsort(
            non_dominated_front[:, 0])]

        # Updating reference point (using for calculating the Hypervolume metric)
        self.reference_point[0] = max(self.reference_point[0],
                                      max(non_dominated_front[:, 0]))
        self.reference_point[1] = max(self.reference_point[1],
                                      max(non_dominated_front[:, 1]))

        # Calculate the IGD metric
        IGD = calc_IGD(pareto_s=non_dominated_front,
                       pareto_front=self.problem.true_pf)

        if len(self.n_evals_history) == 0:
            self.IGD_history.append(IGD)
            self.n_evals_history.append(self.nEs)
        else:
            if self.nEs == self.n_evals_history[-1]:
                self.IGD_history[-1] = IGD
            else:
                self.IGD_history.append(IGD)
                self.n_evals_history.append(self.nEs)

        if self.IGD_history[-1] == 0 and not self.canTerminate:
            self.canTerminate = True
            self.nEs_converging = self.nEs

        # Saving the results
        p.dump([non_dominated_front, self.nEs],
               open(f'{self.path}/pf_and_n_evals/gen_{self.n_gens}.p', 'wb'))
        p.dump(self.elitist_archive.X,
               open(f'{self.path}/elitist_archive/gen_{self.n_gens}.p', 'wb'))

        print('Gen', self.n_gens)
        print(
            f'--> Number of evaluations used: {self.nEs}/{self.problem.m_nEs}')
        print('-->', IGD)
Пример #16
0
    def do_each_gen(self, first=False):
        if self.using_surrogate_model:
            self.alpha = np.mean(self.F_history)

            if not first:
                E_Archive_1_X = np.array(self.E_Archive_1.X)
                E_Archive_1_hash_ = np.array(self.E_Archive_1.hash_)
                l_E_Archive_1 = len(E_Archive_1_X)

                tmp_set = Population(l_E_Archive_1)
                tmp_set.set('X', E_Archive_1_X)
                tmp_set.set('hash_', E_Archive_1_hash_)
                for i, X in enumerate(E_Archive_1_X):
                    if checking_valid(E_Archive_1_hash_[i],
                                      DS=self.E_Archive.DS,
                                      EA_hash_=self.E_Archive.hash_):
                        F, _ = self.evaluate(X)
                        tmp_set[i].set('F', F)
                        self.E_Archive_1.update(tmp_set[i])

            if self.update_model and self.n_gens % self.updating_model_each_n_gens == 0:
                self.y_pred = np.array(self.y_pred)
                self.y = np.array(self.y)

                error = 1 / len(self.y) * np.sum((self.y - self.y_pred)**2)
                print('error:', error)
                if error <= 1e-3:
                    self.update_model = False
                else:
                    self.y_pred, self.y = [], []
                    data = np.array(self.training_set)
                    self.training_set = []

                    X = []
                    Y = []
                    checked_hash_ = []
                    for i in range(len(data)):
                        hash_ = get_hash_(data[i].X)
                        if checking_valid(hash_,
                                          checked=checked_hash_,
                                          DS=self.E_Archive.DS,
                                          EA_hash_=self.E_Archive.hash_):
                            checked_hash_.append(hash_)
                            F, _ = self.evaluate(data[i].X)
                            data[i].set('F', F)

                            self.E_Archive.update(data[i])
                            X.append(data[i].X)
                            Y.append(F[1])

                    for i, F in enumerate(self.E_Archive.F):
                        X.append(self.E_Archive.X[i])
                        Y.append(F[1])
                    X = np.array(X)
                    Y = np.array(Y)
                    self.surrogate_model.fit(x=X, y=Y, verbose=False)

        # Get the non-dominated front (elitist archive)
        non_dominated_front_val_per = np.array(self.E_Archive.F)
        non_dominated_front_val_per = np.unique(non_dominated_front_val_per,
                                                axis=0)

        # Get the pareto front based on 'test_per' ('val_per' at the current)
        F = np.array([evaluate(X) for X in self.E_Archive.X])
        F = np.unique(F, axis=0)
        l = len(F)
        r = np.zeros(l, dtype=np.int8)
        for i, F_ in enumerate(F):
            if r[i] == 0:
                for j in range(i + 1, l):
                    better_idv = find_better_idv(f0_0=F_[0],
                                                 f0_1=F_[1],
                                                 f1_0=F[j][0],
                                                 f1_1=F[j][1])
                    if better_idv == 0:
                        r[j] += 1
                    elif better_idv == 1:
                        r[i] += 1
                        break
        non_dominated_front_test_per = F[r == 0]

        # Updating reference point (using for calculating the Hypervolume metric)
        self.reference_point_val_per[0] = max(
            self.reference_point_val_per[0],
            max(non_dominated_front_val_per[:, 0]))
        self.reference_point_val_per[1] = max(
            self.reference_point_val_per[1],
            max(non_dominated_front_val_per[:, 1]))

        self.reference_point_test_per[0] = max(
            self.reference_point_test_per[0],
            max(non_dominated_front_test_per[:, 0]))
        self.reference_point_test_per[1] = max(
            self.reference_point_test_per[1],
            max(non_dominated_front_test_per[:, 1]))

        # Calculate the IGD metric
        IGD_val_per = calculating_IGD(approx_pf=non_dominated_front_val_per,
                                      true_pf=TRUE_PF_VAL_PER)
        IGD_test_per = calculating_IGD(approx_pf=non_dominated_front_test_per,
                                       true_pf=TRUE_PF_TEST_PER)

        if len(self.n_evals_history) == 0:
            self.IGD_val_per_history.append(IGD_val_per)
            self.IGD_test_per_history.append(IGD_test_per)
            self.n_evals_history.append(self.n_evals)
        else:
            if self.n_evals == self.n_evals_history[-1]:
                self.IGD_val_per_history[-1] = IGD_val_per
                self.IGD_test_per_history[-1] = IGD_test_per
            else:
                self.IGD_val_per_history.append(IGD_val_per)
                self.IGD_test_per_history.append(IGD_test_per)
                self.n_evals_history.append(self.n_evals)

        if self.IGD_val_per_history[
                -1] == 0 and not self.isStoppingConditionsSatisfied:
            self.isStoppingConditionsSatisfied = True
            self.converging_point = self.n_evals

        # Saving the results
        p.dump(
            [non_dominated_front_val_per, self.n_evals],
            open(f'{self.path}/pf_and_n_evals_validation/gen_{self.n_gens}.p',
                 'wb'))
        p.dump([non_dominated_front_test_per, self.n_evals],
               open(f'{self.path}/pf_and_n_evals_testing/gen_{self.n_gens}.p',
                    'wb'))
        p.dump(self.E_Archive,
               open(f'{self.path}/elitist_archive/gen_{self.n_gens}.p', 'wb'))