Пример #1
0
    def _generate_trials(self, experiment_spec, output_path=""):
        """Generates trials with configurations from `_suggest`.

        Creates a trial_id that is passed into `_suggest`.

        Yields:
            Trial objects constructed according to `spec`
        """
        if "run" not in experiment_spec:
            raise TuneError("Must specify `run` in {}".format(experiment_spec))
        for _ in range(experiment_spec.get("num_samples", 1)):
            trial_id = Trial.generate_id()
            while True:
                suggested_config = self._suggest(trial_id)
                if suggested_config is None:
                    yield None
                else:
                    break
            spec = copy.deepcopy(experiment_spec)
            spec["config"] = merge_dicts(spec["config"], suggested_config)
            flattened_config = resolve_nested_dict(spec["config"])
            self._counter += 1
            tag = "{0}_{1}".format(str(self._counter),
                                   format_vars(flattened_config))
            yield create_trial_from_spec(spec,
                                         output_path,
                                         self._parser,
                                         experiment_tag=tag,
                                         trial_id=trial_id)
Пример #2
0
    def create_trial_if_possible(self, experiment_spec: Dict,
                                 output_path: str) -> Optional[Trial]:
        logger.debug("creating trial")
        trial_id = Trial.generate_id()
        suggested_config = self.searcher.suggest(trial_id)
        if suggested_config == Searcher.FINISHED:
            self._finished = True
            logger.debug("Searcher has finished.")
            return

        if suggested_config is None:
            return
        spec = copy.deepcopy(experiment_spec)
        spec["config"] = merge_dicts(spec["config"],
                                     copy.deepcopy(suggested_config))

        # Create a new trial_id if duplicate trial is created
        flattened_config = resolve_nested_dict(spec["config"])
        self._counter += 1
        tag = "{0}_{1}".format(str(self._counter),
                               format_vars(flattened_config))
        trial = create_trial_from_spec(
            spec,
            output_path,
            self._parser,
            evaluated_params=flatten_dict(suggested_config),
            experiment_tag=tag,
            trial_id=trial_id)
        return trial
Пример #3
0
    def _generate_trials(self, experiment_spec, output_path=""):
        """Generates trials with configurations from `_suggest`.

        Creates a trial_id that is passed into `_suggest`.

        Yields:
            Trial objects constructed according to `spec`
        """
        if "run" not in experiment_spec:
            raise TuneError("Must specify `run` in {}".format(experiment_spec))
        for _ in range(experiment_spec.get("num_samples", 1)):
            trial_id = Trial.generate_id()
            while True:
                suggested_config = self._suggest(trial_id)
                if suggested_config is None:
                    yield None
                else:
                    break
            spec = copy.deepcopy(experiment_spec)
            spec["config"] = merge_dicts(spec["config"], suggested_config)
            flattened_config = resolve_nested_dict(spec["config"])
            self._counter += 1
            tag = "{0}_{1}".format(
                str(self._counter), format_vars(flattened_config))
            yield create_trial_from_spec(
                spec,
                output_path,
                self._parser,
                experiment_tag=tag,
                trial_id=trial_id)
Пример #4
0
    def _generate_trials(self, experiment_spec, output_path=""):
        """Generates trials with configurations from `_suggest`.

        Creates a trial_id that is passed into `_suggest`.

        Yields:
            Trial objects constructed according to `spec`
        """
        if "run" not in experiment_spec:
            raise TuneError("Must specify `run` in {}".format(experiment_spec))
        for _ in range(experiment_spec.get("repeat", 1)):
            trial_id = Trial.generate_id()
            while True:
                suggested_config = self._suggest(trial_id)
                if suggested_config is None:
                    yield None
                else:
                    break
            spec = copy.deepcopy(experiment_spec)
            spec["config"] = suggested_config
            yield create_trial_from_spec(
                spec,
                output_path,
                self._parser,
                experiment_tag=format_vars(spec["config"]),
                trial_id=trial_id)
Пример #5
0
 def __init__(self, i, config):
     self.trainable_name = "trial_{}".format(i)
     self.trial_id = Trial.generate_id()
     self.config = config
     self.experiment_tag = "{}tag".format(i)
     self.trial_name_creator = None
     self.logger_running = False
     self.restored_checkpoint = None
     self.resources = Resources(1, 0)
     self.custom_trial_name = None
Пример #6
0
 def __init__(self, config, trial_id=None):
     self.trial_id = Trial.generate_id() if trial_id is None else trial_id
     self.config = config or {}
     self.status = Trial.PENDING
     self.start_time = None
     self.last_result = {}
     self.last_update_time = -float("inf")
     self.custom_trial_name = None
     self.trainable_name = "trainable"
     self.experiment_tag = "exp"
     self.verbose = False
     self.result_logger = Nologger()
     self.metric_analysis = {}
     self.n_steps = [5, 10]
     self.metric_n_steps = {}
Пример #7
0
    def step(self) -> Trial:
        """Runs one step of the trial event loop.
        Callers should typically run this method repeatedly in a loop. They
        may inspect or modify the runner's state in between calls to step().

        returns a Trial to run
        """
        trial_id = Trial.generate_id()
        config = self._search_alg.suggest(trial_id)
        if config:
            trial = SimpleTrial(config, trial_id)
            self.add_trial(trial)
            trial.set_status(Trial.RUNNING)
        else:
            trial = None
        self.running_trial = trial
        return trial
Пример #8
0
 def __init__(self,
              trial_name="",
              experiment_dir=None,
              upload_dir=None,
              trial_config=None,
              _tune_reporter=None):
     self._experiment_dir = None
     self._logdir = None
     self._upload_dir = None
     self.trial_config = None
     self._iteration = -1
     self.is_tune_session = bool(_tune_reporter)
     self.trial_id = Trial.generate_id()
     if trial_name:
         self.trial_id = trial_name + "_" + self.trial_id
     if self.is_tune_session:
         self._logger = _ReporterHook(_tune_reporter)
     else:
         self._initialize_logging(trial_name, experiment_dir, upload_dir,
                                  trial_config)
Пример #9
0
    def _generate_trials(self, experiment_spec, output_path=""):
        """Generates trials with configurations from `_suggest`.

        Creates a trial_id that is passed into `_suggest`.

        Yields:
            Trial objects constructed according to `spec`
        """
        if "run" not in experiment_spec:
            raise TuneError("Must specify `run` in {}".format(experiment_spec))
        for _ in range(experiment_spec.get("num_samples", 1)):
            trial_id = Trial.generate_id()
            while True:
                suggested_config = self._suggest(trial_id)
                if suggested_config is None:
                    yield None
                else:
                    break
            # spec = copy.deepcopy(experiment_spec)
            # spec["config"] = suggested_config
            self._counter += 1

            def resolve(spec):
                res = {}
                for k, v in spec.items():
                    if isinstance(v, dict):
                        for k_, v_ in resolve(v).items():
                            res[(k, ) + k_] = v_
                    else:
                        res[(k, )] = v
                return res

            resolved_config = resolve({"config": suggested_config})
            tag = "{0}_{1}".format(str(self._counter),
                                   format_vars(resolved_config))
            spec = merge_dicts(experiment_spec, {"config": suggested_config})
            yield create_trial_from_spec(spec,
                                         output_path,
                                         self._parser,
                                         experiment_tag=tag,
                                         trial_id=trial_id)
Пример #10
0
 def __init__(self,
              trial_name=None,
              experiment_dir=None,
              upload_dir=None,
              trial_config=None,
              _tune_reporter=None):
     self._experiment_dir = None
     self._logdir = None
     self._upload_dir = None
     self.trial_config = None
     self._iteration = -1
     self.is_tune_session = bool(_tune_reporter)
     if self.is_tune_session:
         self._logger = _ReporterHook(_tune_reporter)
         self._logdir = _tune_reporter.logdir
         self._trial_name = _tune_reporter.trial_name
         self._trial_id = _tune_reporter.trial_id
     else:
         self._trial_id = Trial.generate_id()
         self._trial_name = trial_name or self._trial_id
         self._initialize_logging(experiment_dir, upload_dir, trial_config)
Пример #11
0
 def run(self, config):
     import tensorflow as tf
     tf.logging.set_verbosity(tf.logging.ERROR)
     Trial_ID = Trial.generate_id()
     print(f"ID : {Trial_ID}")
     w = config["node"]
     tf.reset_default_graph()
     X = tf.placeholder(tf.float32, shape=[None, 2], name="X")
     y = tf.placeholder(tf.float32, shape=[None, 1], name="y")
     W = tf.get_variable("w", shape=[2, w], dtype=tf.float32)
     W2 = tf.get_variable("ww", shape=[w, 1], dtype=tf.float32)
     layer = tf.matmul(X, W)
     logit = tf.matmul(layer, W2)
     loss = tf.reduce_mean((y - logit)**2)
     optimizer = tf.train.AdamOptimizer(0.005)
     solver = optimizer.minimize(loss, var_list=tf.trainable_variables())
     config = tf.ConfigProto(log_device_placement=False)
     config.gpu_options.allow_growth = True
     sess = tf.Session(config=config)
     sess.run(tf.global_variables_initializer())
     for i in range(100):
         _, loss2 = sess.run([solver, loss], feed_dict={X: data, y: true})
     tune.track.log(loss=loss2, done=True)
Пример #12
0
def create_next(client):
    ''' functional API for HPO
    '''
    state = client.get_state()
    setting = client.get_settings_dict()
    if state is None:
        # first time call
        try:
            from ray.tune.trial import Trial
        except ImportError:
            from ..tune.trial import Trial
        method = setting.get('method', 'BlendSearch')
        mode = client.get_optimization_mode()
        if mode == 'minimize':
            mode = 'min'
        elif mode == 'maximize':
            mode = 'max'
        metric = client.get_primary_metric()
        hp_space = client.get_hyperparameter_space_dict()
        space = {}
        for key, value in hp_space.items():
            t = value["type"]
            if t == 'continuous':
                space[key] = uniform(value["min_val"], value["max_val"])
            elif t == 'discrete':
                space[key] = choice(value["values"])
            elif t == 'integral':
                space[key] = randint(value["min_val"], value["max_val"])
            elif t == 'quantized_continuous':
                space[key] = quniform(value["min_val"], value["max_val"],
                                      value["step"])
        init_config = setting.get('init_config', None)
        if init_config:
            points_to_evaluate = [init_config]
        else:
            points_to_evaluate = None
        cat_hp_cost = setting.get('cat_hp_cost', None)

        if method == 'BlendSearch':
            Algo = BlendSearch
        elif method == 'CFO':
            Algo = CFO
        algo = Algo(
            mode=mode,
            metric=metric,
            space=space,
            points_to_evaluate=points_to_evaluate,
            cat_hp_cost=cat_hp_cost,
        )
        time_budget_s = setting.get('time_budget_s', None)
        if time_budget_s:
            algo._deadline = time_budget_s + time.time()
        config2trialid = {}
    else:
        algo = state['algo']
        config2trialid = state['config2trialid']
    # update finished trials
    trials_completed = []
    for trial in client.get_trials():
        if trial.end_time is not None:
            signature = algo._ls.config_signature(trial.hp_sample)
            if not algo._result[signature]:
                trials_completed.append((trial.end_time, trial))
    trials_completed.sort()
    for t in trials_completed:
        end_time, trial = t
        trial_id = config2trialid[trial.hp_sample]
        result = {}
        result[algo.metric] = trial.metrics[algo.metric].values[-1]
        result[algo.cost_attr] = (end_time - trial.start_time).total_seconds()
        for key, value in trial.hp_sample.items():
            result['config/' + key] = value
        algo.on_trial_complete(trial_id, result=result)
    # propose new trial
    trial_id = Trial.generate_id()
    config = algo.suggest(trial_id)
    if config:
        config2trialid[config] = trial_id
        client.launch_trial(config)
    client.update_state({'algo': algo, 'config2trialid': config2trialid})