Exemplo n.º 1
0
import multiprocessing

from pymoo.algorithms.nsga3 import NSGA3
from pymoo.factory import get_reference_directions
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter

from modact.interfaces.pymoo import PymopProblem

if __name__ == "__main__":
    n_proccess = 8
    pool = multiprocessing.Pool(n_proccess)
    problem = PymopProblem("cts2", parallelization=('starmap', pool.starmap))

    mu = 92
    ref_dirs = get_reference_directions("das-dennis",
                                        problem.n_obj,
                                        n_partitions=12)

    algorithm = NSGA3(ref_dirs, pop_size=mu, eliminate_duplicates=True)

    res = minimize(problem, algorithm, ('n_gen', 200), seed=1, verbose=True)

    plot = Scatter()
    plot.add(problem.pareto_front(),
             plot_type="line",
             color="black",
             alpha=0.7)
    plot.add(res.F, color="red")
    plot.save("test.png")
Exemplo n.º 2
0
    def search(self):

        if self.resume:
            archive = self._resume_from_dir()
        else:
            # the following lines corresponding to Algo 1 line 1-7 in the paper
            archive = [
            ]  # initialize an empty archive to store all trained CNNs

            # Design Of Experiment
            if self.iterations < 1:
                arch_doe = self.search_space.sample(self.n_doe)
            else:
                arch_doe = self.search_space.initialize(self.n_doe)

            # parallel evaluation of arch_doe
            top1_err, complexity = self._evaluate(arch_doe, it=0)

            # store evaluated / trained architectures
            for member in zip(arch_doe, top1_err, complexity):
                archive.append(member)

        # reference point (nadir point) for calculating hypervolume
        ref_pt = np.array(
            [np.max([x[1] for x in archive]),
             np.max([x[2] for x in archive])])

        # main loop of the search
        for it in range(1, self.iterations + 1):

            # construct accuracy predictor surrogate model from archive
            # Algo 1 line 9 / Fig. 3(a) in the paper
            acc_predictor, a_top1_err_pred = self._fit_acc_predictor(archive)

            # search for the next set of candidates for high-fidelity evaluation (lower level)
            # Algo 1 line 10-11 / Fig. 3(b)-(d) in the paper
            candidates, c_top1_err_pred = self._next(archive, acc_predictor,
                                                     self.n_iter)

            # high-fidelity evaluation (lower level)
            # Algo 1 line 13-14 / Fig. 3(e) in the paper
            c_top1_err, complexity = self._evaluate(candidates, it=it)

            # check for accuracy predictor's performance
            rmse, rho, tau = get_correlation(
                np.vstack((a_top1_err_pred, c_top1_err_pred)),
                np.array([x[1] for x in archive] + c_top1_err))

            # add to archive
            # Algo 1 line 15 / Fig. 3(e) in the paper
            for member in zip(candidates, c_top1_err, complexity):
                archive.append(member)

            # calculate hypervolume
            hv = self._calc_hv(
                ref_pt,
                np.column_stack(
                    ([x[1] for x in archive], [x[2] for x in archive])))

            # print iteration-wise statistics
            print("Iter {}: hv = {:.2f}".format(it, hv))
            print(
                "fitting {}: RMSE = {:.4f}, Spearman's Rho = {:.4f}, Kendall’s Tau = {:.4f}"
                .format(self.predictor, rmse, rho, tau))

            # dump the statistics
            with open(os.path.join(self.save_path, "iter_{}.stats".format(it)),
                      "w") as handle:
                json.dump(
                    {
                        'archive': archive,
                        'candidates': archive[-self.n_iter:],
                        'hv': hv,
                        'surrogate': {
                            'model':
                            self.predictor,
                            'name':
                            acc_predictor.name,
                            'winner':
                            acc_predictor.winner
                            if self.predictor == 'as' else acc_predictor.name,
                            'rmse':
                            rmse,
                            'rho':
                            rho,
                            'tau':
                            tau
                        }
                    }, handle)
            if _DEBUG:
                # plot
                plot = Scatter(legend={'loc': 'lower right'})
                F = np.full((len(archive), 2), np.nan)
                F[:,
                  0] = np.array([x[2]
                                 for x in archive])  # second obj. (complexity)
                F[:, 1] = 100 - np.array([x[1]
                                          for x in archive])  # top-1 accuracy
                plot.add(F,
                         s=15,
                         facecolors='none',
                         edgecolors='b',
                         label='archive')
                F = np.full((len(candidates), 2), np.nan)
                F[:, 0] = np.array(complexity)
                F[:, 1] = 100 - np.array(c_top1_err)
                plot.add(F, s=30, color='r', label='candidates evaluated')
                F = np.full((len(candidates), 2), np.nan)
                F[:, 0] = np.array(complexity)
                F[:, 1] = 100 - c_top1_err_pred[:, 0]
                plot.add(F,
                         s=20,
                         facecolors='none',
                         edgecolors='g',
                         label='candidates predicted')
                plot.save(
                    os.path.join(self.save_path, 'iter_{}.png'.format(it)))

        return
Exemplo n.º 3
0
)

pickle.dump(dict(
    X=res.X,
    F=res.F,
    G=res.G,
    CV=res.CV,
), open(os.path.join(config.tmp_folder, "genetic_result"), "wb"))

if config.problem_args["n_obj"] == 2:
    plot = Scatter(labels=[
        "similarity",
        "discriminator",
    ])
    plot.add(res.F, color="red")
    plot.save(os.path.join(config.tmp_folder, "F.jpg"))

if config.problem_args["n_obj"] == 1:
    sortedpop = sorted(res.pop, key=lambda p: p.F)
    X = np.stack([p.X for p in sortedpop])
else:
    X = res.pop.get("X")

ls = config.latent(config)
ls.set_from_population(X)

torch.save(ls.state_dict(), os.path.join(config.tmp_folder, "ls_result"))

if config.problem_args["n_obj"] == 1:
    X = np.atleast_2d(res.X)
else: