Exemplo n.º 1
0
def generate_variants_compatible(
    unresolved_spec: Dict, constant_grid_search: bool = False, random_state=None
) -> Generator[Tuple[Dict, Dict], None, None]:
    try:
        return generate_variants(unresolved_spec, constant_grid_search, random_state)
    except TypeError:
        return generate_variants(unresolved_spec, constant_grid_search)
Exemplo n.º 2
0
 def _generate_resolved_specs(self, num_samples, unresolved_spec):
     """Needed for slurm_cluster.py
     """
     for _ in range(num_samples):
         # Iterate over list of configs
         for unresolved_cfg in unresolved_spec["config"]:
             unresolved_spec_variant = deepcopy(unresolved_spec)
             unresolved_spec_variant["config"] = unresolved_cfg
             resolved_base_vars = CustomVariantGenerator._extract_resolved_base_vars(unresolved_cfg,
                                                                                     unresolved_spec["config"])
             print("Resolved base cfg vars", resolved_base_vars)
             for resolved_vars, spec in generate_variants(unresolved_spec_variant):
                 resolved_vars.update(resolved_base_vars)
                 print("Resolved vars", resolved_vars)
                 trial_id = "%05d" % self._counter
                 experiment_tag = str(self._counter)
                 if resolved_vars:
                     experiment_tag += "_{}".format(
                         format_vars({k: v for k, v in resolved_vars.items() if "tag" in k}))
                 self._counter += 1
                 yield {
                     "spec": spec,
                     "evaluated_params": flatten_resolved_vars(resolved_vars),
                     "trial_id": trial_id,
                     "experiment_tag": experiment_tag
                 }
Exemplo n.º 3
0
    def _generate_trials(self, unresolved_spec, output_path=""):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Yields:
            Trial object
        """

        if "run" not in unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(unresolved_spec))
        for _ in range(unresolved_spec.get("num_samples", 1)):
            for resolved_vars, spec in generate_variants(unresolved_spec):
                experiment_tag = str(self._counter)
                if resolved_vars:
                    experiment_tag += "_{}".format(resolved_vars)
                self._counter += 1
                yield create_trial_from_spec(
                    spec,
                    output_path,
                    self._parser,
                    evaluated_params=resolved_vars,
                    experiment_tag=experiment_tag)
Exemplo n.º 4
0
    def _generate_trials(self, unresolved_spec, output_path=""):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Yields:
            Trial object
        """

        if "run" not in unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(unresolved_spec))
        for _ in range(unresolved_spec.get("num_samples", 1)):
            for resolved_vars, spec in generate_variants(unresolved_spec):
                experiment_tag = str(self._counter)
                if resolved_vars:
                    experiment_tag += "_{}".format(resolved_vars)
                self._counter += 1
                yield create_trial_from_spec(
                    spec,
                    output_path,
                    self._parser,
                    experiment_tag=experiment_tag)
Exemplo n.º 5
0
 def _select(self):
     choices = self.search_space.to_random_choice()
     configs = []
     for _ in range(self.repeat):
         for _, config in generate_variants(choices):
             configs.append(config)
     return configs, None
Exemplo n.º 6
0
    def testTuneSampleAPI(self):
        config = {
            "func": tune.sample_from(lambda spec: spec.config.uniform * 0.01),
            "uniform": tune.uniform(-5, -1),
            "quniform": tune.quniform(3.2, 5.4, 0.2),
            "loguniform": tune.loguniform(1e-4, 1e-2),
            "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
            "choice": tune.choice([2, 3, 4]),
            "randint": tune.randint(-9, 15),
            "qrandint": tune.qrandint(-21, 12, 3),
            "randn": tune.randn(10, 2),
            "qrandn": tune.qrandn(10, 2, 0.2),
        }
        for _, (_, generated) in zip(
                range(1000), generate_variants({
                    "config": config
                })):
            out = generated["config"]

            self.assertAlmostEqual(out["func"], out["uniform"] * 0.01)

            self.assertGreaterEqual(out["uniform"], -5)
            self.assertLess(out["uniform"], -1)

            self.assertGreaterEqual(out["quniform"], 3.2)
            self.assertLessEqual(out["quniform"], 5.4)
            self.assertAlmostEqual(out["quniform"] / 0.2,
                                   round(out["quniform"] / 0.2))

            self.assertGreaterEqual(out["loguniform"], 1e-4)
            self.assertLess(out["loguniform"], 1e-2)

            self.assertGreaterEqual(out["qloguniform"], 1e-4)
            self.assertLessEqual(out["qloguniform"], 1e-1)
            self.assertAlmostEqual(out["qloguniform"] / 5e-5,
                                   round(out["qloguniform"] / 5e-5))

            self.assertIn(out["choice"], [2, 3, 4])

            self.assertGreaterEqual(out["randint"], -9)
            self.assertLess(out["randint"], 15)

            self.assertGreaterEqual(out["qrandint"], -21)
            self.assertLessEqual(out["qrandint"], 12)
            self.assertEqual(out["qrandint"] % 3, 0)

            # Very improbable
            self.assertGreater(out["randn"], 0)
            self.assertLess(out["randn"], 20)

            self.assertGreater(out["qrandn"], 0)
            self.assertLess(out["qrandn"], 20)
            self.assertAlmostEqual(out["qrandn"] / 0.2,
                                   round(out["qrandn"] / 0.2))
Exemplo n.º 7
0
    def _generate_trials(self,
                         num_samples,
                         unresolved_spec,
                         output_path="",
                         points_to_evaluate=None):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Yields:
            Trial object
        """

        if "run" not in unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(unresolved_spec))

        points_to_evaluate = points_to_evaluate or []

        while points_to_evaluate:
            config = points_to_evaluate.pop(0)
            for resolved_vars, spec in get_preset_variants(
                    unresolved_spec, config):
                trial_id = self._uuid_prefix + ("%05d" % self._counter)
                experiment_tag = str(self._counter)
                self._counter += 1
                yield create_trial_from_spec(
                    spec,
                    output_path,
                    self._parser,
                    evaluated_params=flatten_resolved_vars(resolved_vars),
                    trial_id=trial_id,
                    experiment_tag=experiment_tag)
            num_samples -= 1

        if num_samples <= 0:
            return

        for _ in range(num_samples):
            for resolved_vars, spec in generate_variants(unresolved_spec):
                trial_id = self._uuid_prefix + ("%05d" % self._counter)
                experiment_tag = str(self._counter)
                if resolved_vars:
                    experiment_tag += "_{}".format(format_vars(resolved_vars))
                self._counter += 1
                yield create_trial_from_spec(
                    spec,
                    output_path,
                    self._parser,
                    evaluated_params=flatten_resolved_vars(resolved_vars),
                    trial_id=trial_id,
                    experiment_tag=experiment_tag)
Exemplo n.º 8
0
    def __next__(self):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Returns:
            Trial object
        """

        if "run" not in self.unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(
                self.unresolved_spec))

        if self.variants and self.variants.has_next():
            # This block will be skipped upon instantiation.
            # `variants` will be set later after the first loop.
            resolved_vars, spec = next(self.variants)
            return self.create_trial(resolved_vars, spec)

        if self.points_to_evaluate:
            config = self.points_to_evaluate.pop(0)
            self.num_samples_left -= 1
            self.variants = _VariantIterator(
                get_preset_variants(
                    self.unresolved_spec,
                    config,
                    constant_grid_search=self.constant_grid_search,
                    random_state=self.random_state,
                ),
                lazy_eval=self.lazy_eval,
            )
            resolved_vars, spec = next(self.variants)
            return self.create_trial(resolved_vars, spec)
        elif self.num_samples_left > 0:
            self.variants = _VariantIterator(
                generate_variants(
                    self.unresolved_spec,
                    constant_grid_search=self.constant_grid_search,
                    random_state=self.random_state,
                ),
                lazy_eval=self.lazy_eval,
            )
            self.num_samples_left -= 1
            resolved_vars, spec = next(self.variants)
            return self.create_trial(resolved_vars, spec)
        else:
            raise StopIteration
Exemplo n.º 9
0
    def complete_config(self,
                        partial_config: Dict,
                        lower: Optional[Dict] = None,
                        upper: Optional[Dict] = None) -> Dict:
        ''' generate a complete config from the partial config input
        add minimal resource to config if available
        '''
        if self._reset_times and partial_config == self.init_config:
            # not the first time to complete init_config, use random gaussian
            normalized = self.normalize(partial_config)
            for key in normalized:
                # don't change unordered cat choice
                if key not in self._unordered_cat_hp:
                    if upper and lower:
                        u, l = upper[key], lower[key]
                        gauss_std = u - l or self.STEPSIZE
                        # allowed bound
                        u += self.STEPSIZE
                        l -= self.STEPSIZE
                    elif key in self._bounded_keys:
                        u, l, gauss_std = 1, 0, 1.0
                    else:
                        u, l, gauss_std = np.Inf, -np.Inf, 1.0
                    if key in self._bounded_keys:
                        u = min(u, 1)
                        l = max(l, 0)
                    delta = self.rand_vector_gaussian(1, gauss_std)[0]
                    normalized[key] = max(l, min(u, normalized[key] + delta))
            # use best config for unordered cat choice
            config = self.denormalize(normalized)
        else:
            # first time init_config, or other configs, take as is
            config = partial_config.copy()
        if partial_config == self.init_config: self._reset_times += 1
        config = flatten_dict(config)
        for key, value in self.space.items():
            if key not in config:
                config[key] = value
        # logger.debug(f'before random {config}')
        for _, generated in generate_variants({'config': config}):
            config = generated['config']
            break
        # logger.debug(f'after random {config}')

        if self._resource:
            config[self.prune_attr] = self.min_resource
        return unflatten_dict(config)
Exemplo n.º 10
0
def generate_trial_variants(config):
    """
    Generate configuration for each trial variant evaluating 'ray.tune'
    functions (grid_search, sample_from, ...) into its final values.

    :param config: Ray tune configuration with 'ray.tune' functions
    :return: list of dict for each trial configuration variant
    """
    trials = []
    num_samples = config["num_samples"]
    for i in range(num_samples):
        for variables, variant in generate_variants(config):
            # Update experiment tag with variant vars
            if len(variables) > 0:
                variant["experiment_tag"] = f"{i}_{format_vars(variables)}"
            else:
                variant["experiment_tag"] = str(i)

            trials.append(variant)

    return trials
Exemplo n.º 11
0
    def _generate_trials(self, num_samples, unresolved_spec, output_path=""):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Yields:
            Trial object
        """

        if "run" not in unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(unresolved_spec))
        for _ in range(num_samples):
            # Iterate over list of configs
            for unresolved_cfg in unresolved_spec["config"]:
                unresolved_spec_variant = deepcopy(unresolved_spec)
                unresolved_spec_variant["config"] = unresolved_cfg
                resolved_base_vars = CustomVariantGenerator._extract_resolved_base_vars(unresolved_cfg,
                                                                                        unresolved_spec["config"])
                print("Resolved base cfg vars", resolved_base_vars)
                for resolved_vars, spec in generate_variants(unresolved_spec_variant):
                    resolved_vars.update(resolved_base_vars)
                    print("Resolved vars", resolved_vars)
                    trial_id = "%05d" % self._counter
                    experiment_tag = str(self._counter)
                    if resolved_vars:
                        experiment_tag += "_{}".format(
                            format_vars({k: v for k, v in resolved_vars.items() if "tag" in k}))
                    self._counter += 1
                    yield create_trial_from_spec(
                        spec,
                        output_path,
                        self._parser,
                        evaluated_params=flatten_resolved_vars(resolved_vars),
                        trial_id=trial_id,
                        experiment_tag=experiment_tag)
Exemplo n.º 12
0
    def _generate_trials(self, num_samples, unresolved_spec, output_path=""):
        """Generates Trial objects with the variant generation process.

        Uses a fixed point iteration to resolve variants. All trials
        should be able to be generated at once.

        See also: `ray.tune.suggest.variant_generator`.

        Yields:
            Trial object
        """

        if "run" not in unresolved_spec:
            raise TuneError("Must specify `run` in {}".format(unresolved_spec))
        for _ in range(num_samples):
            for resolved_vars, spec in generate_variants(unresolved_spec):
                while True:
                    if self._num_live_trials() >= self._max_concurrent:
                        yield None
                    else:
                        break

                trial_id = "%05d" % self._counter
                experiment_tag = str(self._counter)
                if resolved_vars:
                    experiment_tag += "_{}".format(format_vars(resolved_vars))
                self._counter += 1
                self._live_trials.add(trial_id)
                yield create_trial_from_spec(
                    spec,
                    output_path,
                    self._parser,
                    evaluated_params=flatten_resolved_vars(resolved_vars),
                    trial_id=trial_id,
                    experiment_tag=experiment_tag,
                )
Exemplo n.º 13
0
 def _select(self):
     grid = self.search_space.to_grid_search()
     configs = []
     for _, config in generate_variants(grid):
         configs.append(config)
     return configs, None
Exemplo n.º 14
0
 def suggest(self, trial_id: str) -> Optional[Dict]:
     ''' choose thread, suggest a valid config
     '''
     if self._init_used and not self._points_to_evaluate:
         choice, backup = self._select_thread()
         # logger.debug(f"choice={choice}, backup={backup}")
         if choice < 0: return None  # timeout
         self._use_rs = False
         config = self._search_thread_pool[choice].suggest(trial_id)
         skip = self._should_skip(choice, trial_id, config)
         if skip:
             if choice:
                 # logger.info(f"skipping choice={choice}, config={config}")
                 return None
             # use rs
             self._use_rs = True
             for _, generated in generate_variants(
                 {'config': self._ls.space}):
                 config = generated['config']
                 break
             # logger.debug(f"random config {config}")
             skip = self._should_skip(choice, trial_id, config)
             if skip: return None
         # if not choice: logger.info(config)
         if choice or backup == choice or self._valid(config):
             # LS or valid or no backup choice
             self._trial_proposed_by[trial_id] = choice
         else:  # invalid config proposed by GS
             if not self._use_rs:
                 self._search_thread_pool[choice].on_trial_complete(
                     trial_id, {}, error=True)  # tell GS there is an error
             self._use_rs = False
             config = self._search_thread_pool[backup].suggest(trial_id)
             skip = self._should_skip(backup, trial_id, config)
             if skip:
                 return None
             self._trial_proposed_by[trial_id] = backup
             choice = backup
         # if choice: self._pending.add(choice) # local search thread pending
         if not choice:
             if self._ls._resource:
                 # TODO: add resource to config proposed by GS, min or median?
                 config[self._ls.prune_attr] = self._ls.min_resource
         self._result[self._ls.config_signature(config)] = {}
     else:  # use init config
         init_config = self._points_to_evaluate.pop(
             0) if self._points_to_evaluate else self._ls.init_config
         config = self._ls.complete_config(init_config,
                                           self._admissible_min,
                                           self._admissible_max)
         # logger.info(f"reset config to {config}")
         config_signature = self._ls.config_signature(config)
         result = self._result.get(config_signature)
         if result:  # tried before
             # self.on_trial_complete(trial_id, result)
             return None
         elif result is None:  # not tried before
             self._result[config_signature] = {}
         else:
             return None  # running but no result yet
         self._init_used = True
     # logger.info(f"config={config}")
     return config
Exemplo n.º 15
0
 def suggest(self, trial_id: str) -> Optional[Dict]:
     ''' choose thread, suggest a valid config
     '''
     if self._init_used and not self._points_to_evaluate:
         choice, backup = self._select_thread()
         # print(f"choice={choice}, backup={backup}")
         if choice < 0: return None  # timeout
         self._use_rs = False
         config = self._search_thread_pool[choice].suggest(trial_id)
         # preliminary check; not checking config validation
         skip = self._should_skip(choice, trial_id, config)
         if skip:
             if choice:
                 # print(f"skipping choice={choice}, config={config}")
                 return None
             # use rs when BO fails to suggest a config
             self._use_rs = True
             for _, generated in generate_variants(
                 {'config': self._ls.space}):
                 config = generated['config']
                 break  # get one random config
             # logger.debug(f"random config {config}")
             skip = self._should_skip(choice, trial_id, config)
             if skip: return None
         # if not choice: print(config)
         if choice or self._valid(config):
             # LS or valid or no backup choice
             self._trial_proposed_by[trial_id] = choice
         else:  # invalid config proposed by GS
             # if not self._use_rs:
             #     self._search_thread_pool[choice].on_trial_complete(
             #         trial_id, {}, error=True) # tell GS there is an error
             self._use_rs = False
             if choice == backup:
                 # use CFO's init point
                 init_config = self._ls.init_config
                 config = self._ls.complete_config(init_config,
                                                   self._ls_bound_min,
                                                   self._ls_bound_max)
                 self._trial_proposed_by[trial_id] = choice
             else:
                 config = self._search_thread_pool[backup].suggest(trial_id)
                 skip = self._should_skip(backup, trial_id, config)
                 if skip:
                     return None
                 self._trial_proposed_by[trial_id] = backup
                 choice = backup
         if not choice:  # global search
             if self._ls._resource:
                 # TODO: add resource to config proposed by GS, min or median?
                 config[self._ls.prune_attr] = self._ls.min_resource
             # temporarily relax admissible region for parallel proposals
             self._update_admissible_region(config, self._gs_admissible_min,
                                            self._gs_admissible_max)
         else:
             self._update_admissible_region(config, self._ls_bound_min,
                                            self._ls_bound_max)
             self._gs_admissible_min.update(self._ls_bound_min)
             self._gs_admissible_max.update(self._ls_bound_max)
         self._result[self._ls.config_signature(config)] = {}
     else:  # use init config
         # print("use init config")
         init_config = self._points_to_evaluate.pop(
             0) if self._points_to_evaluate else self._ls.init_config
         config = self._ls.complete_config(init_config, self._ls_bound_min,
                                           self._ls_bound_max)
         # logger.info(f"reset config to {config}")
         config_signature = self._ls.config_signature(config)
         result = self._result.get(config_signature)
         if result:  # tried before
             # self.on_trial_complete(trial_id, result)
             return None
         elif result is None:  # not tried before
             self._result[config_signature] = {}
         else:
             return None  # running but no result yet
         self._init_used = True
         self._trial_proposed_by[trial_id] = 0
     # logger.info(f"config={config}")
     return config
Exemplo n.º 16
0
    def train_model_on_task(self, task, task_viz, exp_dir, use_ray,
                            use_ray_logging, grace_period,
                            num_hp_samplings, local_mode,
                            redis_address, lca_n, **training_params):
        logger.info("Training dashboard: {}".format(get_env_url(task_viz)))
        t_id = task['id']

        trainable = self.get_trainable(use_ray_logging=use_ray_logging)
        past_tasks = training_params.pop('past_tasks')
        normalize = training_params.pop('normalize')
        augment_data = training_params.pop('augment_data')

        transformations = []
        if augment_data:
            transformations.extend([
                transforms.ToPILImage(),
                transforms.RandomHorizontalFlip(),
                transforms.RandomCrop(32, 4),
                transforms.ToTensor()
            ])
        t_trans = [[] for _ in range(len(task['split_names']))]
        t_trans[0] = transformations
        datasets = trainable._load_datasets(task,
                                            task['loss_fn'],
                                            past_tasks, t_trans, normalize)
        train_loader, eval_loaders = get_classic_dataloaders(datasets,
                                                             training_params.pop(
                                                                 'batch_sizes'))
        model = self.get_model(task_id=t_id, x_dim=task['x_dim'],
                               n_classes=task['n_classes'],
                               descriptor=task['descriptor'],
                               dataset=eval_loaders[:2])

        if use_ray:
            if not ray.is_initialized():
                ray.init(address=redis_address)

            scheduler = None

            training_params['loss_fn'] = tune.function(
                training_params['loss_fn'])
            training_params['optim_func'] = tune.function(self.optim_func)

            init_model_path = os.path.join(exp_dir, 'model_initializations')
            model_file_name = '{}_init.pth'.format(training_params['name'])
            model_path = os.path.join(init_model_path, model_file_name)
            torch.save(model, model_path)

            training_params['model_path'] = model_path
            config = {**self.get_search_space(),
                      'training-params': training_params}
            if use_ray_logging:
                stop_condition = {'training_iteration':
                                      training_params['n_it_max']}
                checkpoint_at_end = False
                keep_checkpoints_num = 1
                checkpoint_score_attr = 'min-Val nll'
            else:
                stop_condition = None
                # loggers = [JsonLogger, MyCSVLogger]
                checkpoint_at_end = False
                keep_checkpoints_num = None
                checkpoint_score_attr = None

            trainable = rename_class(trainable, training_params['name'])
            experiment = Experiment(
                name=training_params['name'],
                run=trainable,
                stop=stop_condition,
                config=config,
                resources_per_trial=self.ray_resources,
                num_samples=num_hp_samplings,
                local_dir=exp_dir,
                loggers=(JsonLogger, CSVLogger),
                checkpoint_at_end=checkpoint_at_end,
                keep_checkpoints_num=keep_checkpoints_num,
                checkpoint_score_attr=checkpoint_score_attr)

            analysis = tune.run(experiment,
                                scheduler=scheduler,
                                verbose=1,
                                raise_on_failed_trial=True,
                                # max_failures=-1,
                                # with_server=True,
                                # server_port=4321
                                )
            os.remove(model_path)
            logger.info("Training dashboard: {}".format(get_env_url(task_viz)))

            all_trials = {t.logdir: t for t in analysis.trials}
            best_logdir = analysis.get_best_logdir('Val nll', 'min')
            best_trial = all_trials[best_logdir]

            # picked_metric = 'accuracy_0'
            # metric_names = {s: '{} {}'.format(s, picked_metric) for s in
            #                 ['Train', 'Val', 'Test']}

            logger.info('Best trial: {}'.format(best_trial))
            best_res = best_trial.checkpoint.result
            best_point = (best_res['training_iteration'], best_res['Val nll'])

            # y_keys = ['mean_loss' if use_ray_logging else 'Val nll', 'train_loss']
            y_keys = ['Val nll', 'Train nll']

            epoch_key = 'training_epoch'
            it_key = 'training_iteration'
            plot_res_dataframe(analysis, training_params['name'], best_point,
                               task_viz, epoch_key, it_key, y_keys)
            if 'entropy' in next(iter(analysis.trial_dataframes.values())):
                plot_res_dataframe(analysis, training_params['name'], None,
                                    task_viz, epoch_key, it_key, ['entropy'])
            best_model = self.get_model(task_id=t_id)
            best_model.load_state_dict(torch.load(best_trial.checkpoint.value))

            train_accs = analysis.trial_dataframes[best_logdir]['Train accuracy_0']
            best_t = best_res['training_iteration']
            t = best_trial.last_result['training_iteration']
        else:
            search_space = self.get_search_space()
            rand_config = list(generate_variants(search_space))[0][1]
            learner_params = rand_config.pop('learner-params', {})
            optim_params = rand_config.pop('optim')


            split_optims = training_params.pop('split_optims')
            if hasattr(model, 'set_h_params'):
                model.set_h_params(**learner_params)
            if hasattr(model, 'train_loader_wrapper'):
                train_loader = model.train_loader_wrapper(train_loader)

            loss_fn = task['loss_fn']
            if hasattr(model, 'loss_wrapper'):
                loss_fn = model.loss_wrapper(task['loss_fn'])

            prepare_batch = _prepare_batch
            if hasattr(model, 'prepare_batch_wrapper'):
                prepare_batch = model.prepare_batch_wrapper(prepare_batch, t_id)

            optim_fact = partial(set_optim_params,
                                 optim_func=self.optim_func,
                                 optim_params=optim_params,
                                 split_optims=split_optims)
            if hasattr(model, 'train_func'):
                f = model.train_func
                t, metrics, b_state_dict = f(train_loader=train_loader,
                                                eval_loaders=eval_loaders,
                                                optim_fact=optim_fact,
                                                loss_fn=loss_fn,
                                                split_names=task['split_names'],
                                                viz=task_viz,
                                                prepare_batch=prepare_batch,
                                                **training_params)
            else:
                optim = optim_fact(model=model)
                t, metrics, b_state_dict = train(model=model,
                                                 train_loader=train_loader,
                                                 eval_loaders=eval_loaders,
                                                 optimizer=optim,
                                                 loss_fn=loss_fn,
                                                 split_names=task['split_names'],
                                                 viz=task_viz,
                                                 prepare_batch=prepare_batch,
                                                 **training_params)
            train_accs = metrics['Train accuracy_0']
            best_t = b_state_dict['iter']
            if 'training_archs' in metrics:
                plot_trajectory(model.ssn.graph, metrics['training_archs'],
                                model.ssn.stochastic_node_ids, task_viz)
                weights = model.arch_sampler().squeeze()
                archs = model.ssn.get_top_archs(weights, 5)
                list_top_archs(archs, task_viz)
                list_arch_scores(self.arch_scores[t_id], task_viz)
                update_summary(self.arch_scores[t_id], task_viz, 'scores')

        if len(train_accs) > lca_n:
            lca_accs = []
            for i in range(lca_n + 1):
                if i in train_accs:
                    lca_accs.append(train_accs[i])
                else:
                    logger.warning('Missing step for {}/{} for lca computation'
                                   .format(i, lca_n))
            lca = np.mean(lca_accs)
        else:
            lca = np.float('nan')
        stats = {}
        start = time.time()
        # train_idx = task['split_names'].index('Train')
        # train_path = task['data_path'][train_idx]
        # train_dataset = _load_datasets([train_path])[0]
        train_dataset = _load_datasets(task, 'Train')[0]
        stats.update(self.finish_task(train_dataset, t_id, task_viz,
                                      path='drawings'))
        stats['duration'] = {'iterations': t,
                             'finish': time.time() - start,
                             'best_iterations': best_t}
        stats['params'] = {'total': self.n_params(t_id),
                           'new': self.new_params(t_id)}
        stats['lca'] = lca
        return stats
Exemplo n.º 17
0
 def suggest(self, trial_id: str) -> Optional[Dict]:
     ''' choose thread, suggest a valid config
     '''
     if self._init_used and not self._points_to_evaluate:
         choice, backup = self._select_thread()
         if choice < 0:  # timeout
             return None
         self._use_rs = False
         config = self._search_thread_pool[choice].suggest(trial_id)
         if choice and config is None:
             # local search thread finishes
             if self._search_thread_pool[choice].converged:
                 self._expand_admissible_region()
                 del self._search_thread_pool[choice]
             return None
         # preliminary check; not checking config validation
         skip = self._should_skip(choice, trial_id, config)
         if skip:
             if choice:
                 return None
             # use rs when BO fails to suggest a config
             self._use_rs = True
             for _, generated in generate_variants({'config': self._ls.space}):
                 config = generated['config']
                 break  # get one random config
             skip = self._should_skip(choice, trial_id, config)
             if skip:
                 return None
         if choice or self._valid(config):
             # LS or valid or no backup choice
             self._trial_proposed_by[trial_id] = choice
         else:  # invalid config proposed by GS
             self._use_rs = False
             if choice == backup:
                 # use CFO's init point
                 init_config = self._ls.init_config
                 config = self._ls.complete_config(
                     init_config, self._ls_bound_min, self._ls_bound_max)
                 self._trial_proposed_by[trial_id] = choice
             else:
                 config = self._search_thread_pool[backup].suggest(trial_id)
                 skip = self._should_skip(backup, trial_id, config)
                 if skip:
                     return None
                 self._trial_proposed_by[trial_id] = backup
                 choice = backup
         if not choice:  # global search
             if self._ls._resource:
                 # TODO: min or median?
                 config[self._ls.prune_attr] = self._ls.min_resource
             # temporarily relax admissible region for parallel proposals
             self._update_admissible_region(
                 config, self._gs_admissible_min, self._gs_admissible_max)
         else:
             self._update_admissible_region(
                 config, self._ls_bound_min, self._ls_bound_max)
             self._gs_admissible_min.update(self._ls_bound_min)
             self._gs_admissible_max.update(self._ls_bound_max)
         self._result[self._ls.config_signature(config)] = {}
     else:  # use init config
         init_config = self._points_to_evaluate.pop(
             0) if self._points_to_evaluate else self._ls.init_config
         config = self._ls.complete_config(
             init_config, self._ls_bound_min, self._ls_bound_max)
         config_signature = self._ls.config_signature(config)
         result = self._result.get(config_signature)
         if result:  # tried before
             return None
         elif result is None:  # not tried before
             self._result[config_signature] = {}
         else:  # running but no result yet
             return None
         self._init_used = True
         self._trial_proposed_by[trial_id] = 0
     return config