Ejemplo n.º 1
0
* ``acq-func`` : Acquisition function

    * ``LCB`` :
    * ``EI`` :
    * ``PI`` :
    * ``gp_hedge`` : (default)
"""

import signal

from deephyper.search.hps.optimizer import Optimizer
from deephyper.search import Search
from deephyper.search import util

logger = util.conf_logger('deephyper.search.hps.ambs')

SERVICE_PERIOD = 2  # Delay (seconds) between main loop iterations
CHECKPOINT_INTERVAL = 1  # How many jobs to complete between optimizer checkpoints
EXIT_FLAG = False


def on_exit(signum, stack):
    global EXIT_FLAG
    EXIT_FLAG = True


class AMBS(Search):
    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem, run, evaluator, **kwargs)
        logger.info("Initializing AMBS")
Ejemplo n.º 2
0
import os
import json
from random import random, seed

from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.evaluator.evaluate import Encoder

dhlogger = util.conf_logger("deephyper.search.nas.random")


class Random(NeuralArchitectureSearch):
    """Search class to run a full random neural architecture search. The search is filling every available nodes as soon as they are detected. The master job is using only 1 MPI rank.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.nas.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
    """
    def __init__(self, problem, run, evaluator, **kwargs):

        super().__init__(problem=problem,
                         run=run,
                         evaluator=evaluator,
                         **kwargs)

        seed(self.problem.seed)

        self.free_workers = self.evaluator.num_workers
Ejemplo n.º 3
0
import os
import collections
import numpy as np
import json

from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from deephyper.core.parser import add_arguments_from_signature
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.evaluator.evaluate import Encoder

dhlogger = util.conf_logger("deephyper.search.nas.regevo")

# def key(d):
#     return json.dumps(dict(arch_seq=d['arch_seq']), cls=Encoder)


class RegularizedEvolution(NeuralArchitectureSearch):
    """Regularized evolution.

    https://arxiv.org/abs/1802.01548

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.nas.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
        population_size (int, optional): the number of individuals to keep in the population. Defaults to 100.
        sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to 10.
    """
    def __init__(self,
                 problem,
Ejemplo n.º 4
0
import os
import time
from inspect import signature

import numpy as np
import tensorflow as tf
import horovod.tensorflow.keras as hvd

from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.search import util
from deephyper.nas import arch as a
from deephyper.nas import train_utils as U

logger = util.conf_logger(__name__)

AUTOTUNE = tf.data.experimental.AUTOTUNE


class HorovodTrainerTrainValid:
    def __init__(self, config, model):
        self.cname = self.__class__.__name__

        self.config = config

        self.model = model
        self.callbacks = []

        self.data = self.config[a.data]

        # hyperparameters
        self.config_hp = self.config[a.hyperparameters]
Ejemplo n.º 5
0
import os
import json
from pprint import pprint, pformat
from mpi4py import MPI
import math

from deephyper.evaluator import Evaluator
from deephyper.search import util, Search

from deephyper.search.nas.agent import nas_ppo_sync_a3c

logger = util.conf_logger('deephyper.search.nas.ppo_a3c_sync')

def print_logs(runner):
    logger.debug('num_episodes = {}'.format(runner.global_episode))
    logger.debug(' workers = {}'.format(runner.workers))

def key(d):
    return json.dumps(dict(arch_seq=d['arch_seq']))

LAUNCHER_NODES = int(os.environ.get('BALSAM_LAUNCHER_NODES', 1))
WORKERS_PER_NODE = int(os.environ.get('DEEPHYPER_WORKERS_PER_NODE', 1))

class NasPPOSyncA3C(Search):
    """Neural Architecture search using proximal policy gradient with synchronous optimization.
    """

    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem, run, evaluator, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
Ejemplo n.º 6
0
import functools
import math
from sys import float_info

import numpy as np
from numpy import inf

from deephyper.search import util
from skopt import Optimizer as SkOptimizer
from skopt.learning import (
    ExtraTreesRegressor,
    GradientBoostingQuantileRegressor,
    RandomForestRegressor,
)

logger = util.conf_logger("deephyper.search.hps.optimizer.optimizer")


def isnan(x):
    if isinstance(x, float):
        return math.isnan(x)
    elif isinstance(x, np.float64):
        return np.isnan(x)
    else:
        return False


def convert2np(x):
    if x == "nan":
        return np.nan
    elif type(x) is float:
Ejemplo n.º 7
0
import os
import time
from inspect import signature

import numpy as np
import tensorflow as tf

from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.search import util
from deephyper.nas import arch as a
from deephyper.nas import train_utils as U

logger = util.conf_logger("deephyper.model.trainer")

import keras


class TrainerTrainValid:
    def __init__(self, config, model):
        self.cname = self.__class__.__name__

        self.config = config

        self.model = model
        self.callbacks = []

        self.data = self.config[a.data]

        self.config_hp = self.config[a.hyperparameters]
        self.optimizer_name = self.config_hp.get(a.optimizer, "adam")
        self.optimizer_eps = self.config_hp.get("epsilon", None)
Ejemplo n.º 8
0
import tensorflow as tf

from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from deephyper.evaluator.evaluate import Encoder
from deephyper.search.nas.baselines import logger
from deephyper.search.nas.env.neural_architecture_envs import \
    NeuralArchitectureVecEnv
from deephyper.core.logs.logging import JsonMessage as jm

try:
    from mpi4py import MPI
except ImportError:
    MPI = None

dhlogger = util.conf_logger('deephyper.search.nas.rl')


class ReinforcementLearningSearch(NeuralArchitectureSearch):
    """Represents different kind of RL algorithms working with NAS.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.search.nas.model.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
        alg (str): algorithm to use among ['ppo2',].
        network (str/function): policy network.
        num_envs (int): number of environments per agent to run in
            parallel, it corresponds to the number of evaluation per
            batch per agent.
        cache_key (str): ...
Ejemplo n.º 9
0
import time

import gym
import numpy as np
from gym import spaces

from deephyper.search import util
from deephyper.search.nas.baselines.common.vec_env import VecEnv
from deephyper.core.logs.logging import JsonMessage as jm

try:
    from mpi4py import MPI
except ImportError:
    MPI = None

dhlogger = util.conf_logger("deephyper.search.nas.env.nas_env_1")


class NasEnv1(VecEnv):
    """Multiple environments neural architecture generation. One environment corresponds to one deep neural network architecture. The observation space corresponds to the action of previous steps.

    Args:
            num_envs (int): number of environments to run in parallel.
            space (dict): neural architecture search space from the Problem.
            evaluator (Evaluator): evaluator to use to evaluate deep neural networks generated.
            structure (KerasStructure): structure to build deep neural networks.
    """
    def __init__(self, num_envs, space, evaluator, structure):
        assert num_envs >= 1

        self.space = space
Ejemplo n.º 10
0
import numpy as np
import tensorflow as tf
from mpi4py import MPI

import deephyper.search.nas.utils.common.tf_util as U
from deephyper.search import util
from deephyper.search.nas.agent.utils import (reward_for_final_timestep,
                                              traj_segment_generator)
from deephyper.search.nas.utils import logger
from deephyper.search.nas.utils._logging import JsonMessage as jm
from deephyper.search.nas.utils.common import (Dataset, explained_variance,
                                               fmt_row, zipsame)
from deephyper.search.nas.utils.common.mpi_adam_async import MpiAdamAsync
from deephyper.search.nas.utils.common.mpi_moments import mpi_moments

dh_logger = util.conf_logger('deephyper.search.nas.agent.pposgd_async')


def add_vtarg_and_adv(seg, gamma, lam):
    """
    Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
    """
    new = np.append(
        seg["new"], 0
    )  # last element is only used for last vtarg, but we already zeroed it if last new = 1
    vpred = np.append(seg["vpred"], seg["nextvpred"])
    T = len(seg["rew"])
    seg["adv"] = gaelam = np.empty(T, 'float32')
    rew = seg["rew"]
    lastgaelam = 0
    for t in reversed(range(T)):
Ejemplo n.º 11
0
import json
import os.path as osp

import numpy as np
import tensorflow as tf
from mpi4py import MPI

import deephyper.search.nas.utils.common.tf_util as U
from deephyper.evaluator import Evaluator
from deephyper.search.nas.env import NasEnv
from deephyper.search.nas.utils import bench, logger
from deephyper.search.nas.utils.common import set_global_seeds
from deephyper.search import util
from deephyper.search.nas.utils._logging import JsonMessage as jm

dh_logger = util.conf_logger('deephyper.search.nas.agent.nas_random')


def traj_segment_generator(env, horizon):
    t = 0
    ac = env.action_space.sample()  # not used, just so we have the datatype
    new = True  # marks if we're on first timestep of an episode
    ob = env.reset()

    cur_ep_ret = 0  # return in current episode
    cur_ep_len = 0  # len of current episode
    ep_rets = []  # returns of completed episodes in this segment
    ep_lens = []  # lengths of ...

    ts_i2n_ep = {}
Ejemplo n.º 12
0
import signal
import random

from deephyper.search.hps.optimizer import GAOptimizer
from deephyper.search import Search
from deephyper.search import util

logger = util.conf_logger("deephyper.search.hps.ga")

SERVICE_PERIOD = 2  # Delay (seconds) between main loop iterations
CHECKPOINT_INTERVAL = 10  # How many jobs to complete between optimizer checkpoints
EXIT_FLAG = False


def on_exit(signum, stack):
    global EXIT_FLAG
    EXIT_FLAG = True


class GA(Search):
    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem, run, evaluator, **kwargs)
        logger.info("Initializing GA")
        self.optimizer = GAOptimizer(self.problem, self.evaluator.num_workers,
                                     self.args)

    @staticmethod
    def _extend_parser(parser):
        parser.add_argument(
            "--ga_num_gen",
            default=100,
Ejemplo n.º 13
0
import collections
import copy

import ConfigSpace as CS
import numpy as np

from deephyper.problem import HpProblem
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.core.parser import add_arguments_from_signature
from deephyper.search import util
from deephyper.search.nas.regevo import RegularizedEvolution

dhlogger = util.conf_logger("deephyper.search.regevomixed")


class RegularizedEvolutionMixed(RegularizedEvolution):
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
import time

import gym
import numpy as np
from gym import spaces

from deephyper.search import util
from deephyper.search.nas.baselines.common.vec_env import VecEnv
from deephyper.core.logs.logging import JsonMessage as jm

try:
    from mpi4py import MPI
except ImportError:
    MPI = None

dhlogger = util.conf_logger(
    'deephyper.search.nas.env.neural_architecture_envs')


class NeuralArchitectureVecEnv(VecEnv):
    """Multiple environments neural architecture generation. One environment corresponds to one deep neural network architecture.

    Args:
            num_envs (int): number of environments to run in parallel.
            space (dict): neural architecture search space from the Problem.
            evaluator (Evaluator): evaluator to use to evaluate deep neural networks generated.
            structure (KerasStructure): structure to build deep neural networks.
    """

    def __init__(self, num_envs, space, evaluator, structure):
        assert num_envs >= 1
Ejemplo n.º 15
0
import tensorflow as tf
import numpy as np
import math
import traceback
from sklearn.metrics import mean_squared_error

import deephyper.search.nas.model.arch as a
import deephyper.search.nas.model.train_utils as U
from deephyper.search import util
from deephyper.search.nas.utils._logging import JsonMessage as jm
from deephyper.search.nas.model.trainer.train_valid import TrainerTrainValid

logger = util.conf_logger('deephyper.model.trainer')


class TrainerRegressorTrainValid(TrainerTrainValid):
    def __init__(self, config, model):
        super().__init__(config, model)
Ejemplo n.º 16
0
import collections
import json
import os
import copy

import numpy as np
from skopt import Optimizer as SkOptimizer
from skopt.learning import RandomForestRegressor

from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.core.parser import add_arguments_from_signature
from deephyper.evaluator.evaluate import Encoder
from deephyper.search import util
from deephyper.search.nas.regevo import RegularizedEvolution

dhlogger = util.conf_logger("deephyper.search.nas.agebo")

# def key(d):
#     return json.dumps(dict(arch_seq=d['arch_seq']), cls=Encoder)


class AgEBO(RegularizedEvolution):
    """Aging evolution with Bayesian Optimization.

    This algorithm build on the 'Regularized Evolution' from https://arxiv.org/abs/1802.01548. It cumulates Hyperparameter optimization with bayesian optimisation and Neural architecture search with regularized evolution.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.nas.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
        population_size (int, optional): the number of individuals to keep in the population. Defaults to 100.
Ejemplo n.º 17
0
from torch.optim import Adam
import numpy as np
import gym
from gym.spaces import Discrete, Box

from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.evaluator.evaluate import Encoder

# from deephyper.search.nas.env.neural_architecture_envs import NeuralArchitectureVecEnv
from nascd.env.nasenv import NasEnv2

ENV = NasEnv2

dhlogger = util.conf_logger("nascd.search.rtg_pg")


class RtgPG(NeuralArchitectureSearch):
    """Search class to run a full random neural architecture search. The search is filling every available nodes as soon as they are detected. The master job is using only 1 MPI rank.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.search.nas.model.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
    """

    def __init__(self, problem, run, evaluator, **kwargs):

        super().__init__(problem=problem, run=run, evaluator=evaluator, **kwargs)
Ejemplo n.º 18
0
    * ``LCB`` :
    * ``EI`` :
    * ``PI`` :
    * ``gp_hedge`` : (default)
"""

import math
import signal

import numpy as np

import skopt
from deephyper.search import Search, util
from deephyper.core.logs.logging import JsonMessage as jm

dhlogger = util.conf_logger("deephyper.search.hps.ambs")

SERVICE_PERIOD = 2  # Delay (seconds) between main loop iterations
CHECKPOINT_INTERVAL = 1  # How many jobs to complete between optimizer checkpoints
EXIT_FLAG = False


def on_exit(signum, stack):
    global EXIT_FLAG
    EXIT_FLAG = True


class AMBS(Search):
    def __init__(
        self,
        problem,
Ejemplo n.º 19
0
import math

import ConfigSpace as CS
import numpy as np
import skopt

from deephyper.problem import HpProblem
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.core.parser import add_arguments_from_signature
from deephyper.evaluator.evaluate import Encoder
from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from skopt import Optimizer as SkOptimizer

dhlogger = util.conf_logger("deephyper.search.nas.ambsmixed")


class AMBSMixed(NeuralArchitectureSearch):
    def __init__(
        self,
        problem,
        run,
        evaluator,
        surrogate_model="RF",
        acq_func="LCB",
        kappa=1.96,
        xi=0.001,
        liar_strategy="cl_min",
        n_jobs=1,
        **kwargs,
    ):
Ejemplo n.º 20
0
.. code-block:: python

    Problem.hyperparameters(
        ...,
        repeat=N
        ...
    )
"""
import numpy as np
import tensorflow as tf

from deephyper.search import util
from deephyper.nas.run.alpha import run as run_alpha

logger = util.conf_logger("deephyper.search.nas.run")


def run(config: dict) -> float:
    seed = config["seed"]
    repeat = config["hyperparameters"].get("repeat", 1)
    if seed is not None:
        np.random.seed(seed)
        seeds = np.random.randint(0, 2 ** 32 - 1, repeat)

    res_list = []
    for i in range(repeat):
        tf.keras.backend.clear_session()
        if seed is not None:
            config["seed"] = seeds[i]
        res = run_alpha(config)
Ejemplo n.º 21
0
import os
import json
from random import random, seed

from deephyper.search import util
from deephyper.search.nas import NeuralArchitectureSearch
from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.evaluator.evaluate import Encoder

dhlogger = util.conf_logger('deephyper.search.nas.full_random')


class Random(NeuralArchitectureSearch):
    """Search class to run a full random neural architecture search. The search is filling every available nodes as soon as they are detected. The master job is using only 1 MPI rank.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.search.nas.model.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
    """
    def __init__(self, problem, run, evaluator, **kwargs):

        super().__init__(problem=problem,
                         run=run,
                         evaluator=evaluator,
                         **kwargs)

        seed(self.problem.seed)

        if evaluator == 'balsam':
            balsam_launcher_nodes = int(
Ejemplo n.º 22
0
import json
import math
import os
from math import ceil, log
from pprint import pformat, pprint

import tensorflow as tf
from mpi4py import MPI

from deephyper.search import Search, util
from deephyper.search.nas.agent import nas_random

logger = util.conf_logger('deephyper.search.run_nas')


def print_logs(runner):
    logger.debug('num_episodes = {}'.format(runner.global_episode))
    logger.debug(' workers = {}'.format(runner.workers))


def key(d):
    return json.dumps(dict(arch_seq=d['arch_seq']))


LAUNCHER_NODES = int(os.environ.get('BALSAM_LAUNCHER_NODES', 1))
WORKERS_PER_NODE = int(os.environ.get('DEEPHYPER_WORKERS_PER_NODE', 1))


class RandomAgents(Search):
    """Neural Architecture search using random search.
    """
Ejemplo n.º 23
0
import collections
import json
import os
import copy

import numpy as np
from skopt import Optimizer as SkOptimizer
from skopt.learning import RandomForestRegressor

from deephyper.core.logs.logging import JsonMessage as jm
from deephyper.core.parser import add_arguments_from_signature
from deephyper.evaluator.evaluate import Encoder
from deephyper.search import util
from deephyper.search.nas.regevo import RegularizedEvolution

dhlogger = util.conf_logger("deephyper.search.nas.ae_hpo_nas")

# def key(d):
#     return json.dumps(dict(arch_seq=d['arch_seq']), cls=Encoder)


class AeHpoNas(RegularizedEvolution):
    """Aging evolution with Bayesian Optimization.

    This algorithm build on the 'Regularized Evolution' from https://arxiv.org/abs/1802.01548. It cumulates Hyperparameter optimization with bayesian optimisation and Neural architecture search with regularized evolution.

    Args:
        problem (str): Module path to the Problem instance you want to use for the search (e.g. deephyper.benchmark.nas.linearReg.Problem).
        run (str): Module path to the run function you want to use for the search (e.g. deephyper.nas.run.quick).
        evaluator (str): value in ['balsam', 'subprocess', 'processPool', 'threadPool'].
        population_size (int, optional): the number of individuals to keep in the population. Defaults to 100.