コード例 #1
0
ファイル: execution.py プロジェクト: yunasystems/ludwig
    def __init__(self,
                 hyperopt_sampler,
                 output_feature: str,
                 metric: str,
                 split: str,
                 cpu_resources_per_trial: int = None,
                 gpu_resources_per_trial: int = None,
                 kubernetes_namespace: str = None,
                 **kwargs) -> None:
        if ray is None:
            raise ImportError('ray module is not installed. To '
                              'install it,try running pip install ray')
        if not isinstance(hyperopt_sampler, RayTuneSampler):
            raise ValueError(
                'Sampler {} is not compatible with RayTuneExecutor, '
                'please use the RayTuneSampler'.format(hyperopt_sampler))
        HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
                                  metric, split)
        try:
            ray.init('auto', ignore_reinit_error=True)
        except ConnectionError:
            logger.info('Initializing new Ray cluster...')
            ray.init()

        self.search_space = hyperopt_sampler.search_space
        self.num_samples = hyperopt_sampler.num_samples
        self.goal = hyperopt_sampler.goal
        self.search_alg_dict = hyperopt_sampler.search_alg_dict
        self.output_feature = output_feature
        self.metric = metric
        self.split = split
        self.trial_id = 0
        self.cpu_resources_per_trial = cpu_resources_per_trial
        self.gpu_resources_per_trial = gpu_resources_per_trial
        self.kubernetes_namespace = kubernetes_namespace
コード例 #2
0
ファイル: execution.py プロジェクト: jimthompson5802/ludwig
 def get_metric_score(self, train_stats, eval_stats) -> float:
     if self._has_metric(train_stats, TEST):
         logger.info(
             "Returning metric score from training (test) statistics")
         return self.get_metric_score_from_train_stats(train_stats, TEST)
     elif self._has_eval_metric(eval_stats):
         logger.info("Returning metric score from eval statistics. "
                     "If skip_save_model is True, eval statistics "
                     "are calculated using the model at the last epoch "
                     "rather than the model at the epoch with "
                     "best validation performance")
         return self.get_metric_score_from_eval_stats(eval_stats)
     elif self._has_metric(train_stats, VALIDATION):
         logger.info(
             "Returning metric score from training (validation) statistics")
         return self.get_metric_score_from_train_stats(
             train_stats, VALIDATION)
     elif self._has_metric(train_stats, TRAINING):
         logger.info(
             "Returning metric score from training split statistics, "
             "as no test / validation / eval sets were given")
         return self.get_metric_score_from_train_stats(
             train_stats, TRAINING)
     else:
         raise RuntimeError(
             "Unable to obtain metric score from missing training / eval statistics"
         )
コード例 #3
0
ファイル: execution.py プロジェクト: yunasystems/ludwig
 def get_metric_score(self, train_stats, eval_stats) -> float:
     if (train_stats is not None and self.split in train_stats
             and VALIDATION in train_stats
             and  # needed otherwise can-t figure
             # out the best epoch
             self.output_feature in train_stats[self.split] and self.metric
             in train_stats[self.split][self.output_feature]):
         logger.info("Returning metric score from training statistics")
         return self.get_metric_score_from_train_stats(train_stats)
     else:
         logger.info("Returning metric score from eval statistics. "
                     "If skip_save_model is True, eval statistics "
                     "are calculated using the model at the last epoch "
                     "rather than the model at the epoch with "
                     "best validation performance")
         return self.get_metric_score_from_eval_stats(eval_stats)
コード例 #4
0
ファイル: execution.py プロジェクト: jimthompson5802/ludwig
 def __init__(
     self,
     hyperopt_sampler,
     output_feature: str,
     metric: str,
     split: str,
     cpu_resources_per_trial: int = None,
     gpu_resources_per_trial: int = None,
     kubernetes_namespace: str = None,
     time_budget_s: Union[int, float, datetime.timedelta] = None,
     max_concurrent_trials: Optional[int] = None,
     **kwargs,
 ) -> None:
     if ray is None:
         raise ImportError("ray module is not installed. To "
                           "install it,try running pip install ray")
     if not isinstance(hyperopt_sampler, RayTuneSampler):
         raise ValueError(
             "Sampler {} is not compatible with RayTuneExecutor, "
             "please use the RayTuneSampler".format(hyperopt_sampler))
     HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
                               metric, split)
     if not ray.is_initialized():
         try:
             ray.init("auto", ignore_reinit_error=True)
         except ConnectionError:
             logger.info("Initializing new Ray cluster...")
             ray.init(ignore_reinit_error=True)
     self.search_space = hyperopt_sampler.search_space
     self.num_samples = hyperopt_sampler.num_samples
     self.goal = hyperopt_sampler.goal
     self.search_alg_dict = hyperopt_sampler.search_alg_dict
     self.scheduler = hyperopt_sampler.scheduler
     self.decode_ctx = hyperopt_sampler.decode_ctx
     self.output_feature = output_feature
     self.metric = metric
     self.split = split
     self.trial_id = 0
     self.cpu_resources_per_trial = cpu_resources_per_trial
     self.gpu_resources_per_trial = gpu_resources_per_trial
     self.kubernetes_namespace = kubernetes_namespace
     self.time_budget_s = time_budget_s
     self.max_concurrent_trials = max_concurrent_trials
     self.sync_config = None