Exemple #1
0
    def _fit_models(self, learn_files, fold_id_bias):
        """
        Train models for each algorithm and learn dataset(folds). Than return them.

        Args:
            :param learn_files: Entities of FoldStorage for learning models.
            :return: Dictionary of models where the key is case and the value is models on learn folds
        """
        make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        models = {}
        for case in self._cases:
            models[case] = list()

        for file_num, learn_file in enumerate(learn_files):
            pool = FoldModelsHandler._create_pool(learn_file,
                                                  self._thread_count)
            fold_id = fold_id_bias + file_num

            for case in self._cases:
                model_path = os.path.join(
                    FoldModelsHandler.__MODEL_DIR,
                    FoldModelsHandler._create_model_name(case, fold_id))
                get_eval_logger().debug(
                    "For model {} on fold #{} path is {}".format(
                        str(case), fold_id, model_path))
                fold_model = self._fit_model(pool, case, fold_id, model_path)
                get_eval_logger().info(
                    "Model {} on fold #{} was fitted".format(
                        str(case), fold_id))
                models[case].append(fold_model)

        return models
    def _fit_models(self, learn_files, fold_id_bias):
        """
        Train models for each algorithm and learn dataset(folds). Than return them.

        Args:
            :param learn_files: Entities of FoldStorage for learning models.
            :return: Dictionary of models where the key is case and the value is models on learn folds
        """
        make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        models = {}
        for case in self._cases:
            models[case] = list()

        for file_num, learn_file in enumerate(learn_files):
            pool = FoldModelsHandler._create_pool(learn_file, self._thread_count)
            fold_id = fold_id_bias + file_num

            for case in self._cases:
                model_path = os.path.join(FoldModelsHandler.__MODEL_DIR,
                                          FoldModelsHandler._create_model_name(case, fold_id))
                get_eval_logger().debug("For model {} on fold #{} path is {}".format(str(case), fold_id, model_path))
                fold_model = self._fit_model(pool, case, fold_id, model_path)
                get_eval_logger().info("Model {} on fold #{} was fitted".format(str(case), fold_id))
                models[case].append(fold_model)

        return models
Exemple #3
0
    def proceed(self, splitter, fold_size, folds_count, fold_offset):
        """
        Run all processes to gain stats. It applies algorithms to fold files that gains from learning. It keeps
        stats inside models and models are stored in DataFrame. Columns are matched to the different algos and rows to
        the folds.

        Args:
            :param splitter: Splitter entity.
            :param fold_size: The size of fold.
            :param folds_count: Count of golds.
            :param fold_offset: The offset (count of folds that we want to skip).
            :return: return dict: keys metric to CaseEvaluationResult

        """
        try:
            folds_sets = splitter.create_fold_sets(fold_size, folds_count)
            fold_groups_files = splitter.fold_groups_files_generator(
                folds_sets, fold_offset)
            fold_id_bias = fold_offset

            for learn_folds, skipped_folds, rest_folds in fold_groups_files:
                if len(learn_folds) == 0:
                    continue
                list_models = []
                try:
                    permutation_models = self._fit_models(
                        learn_folds, fold_id_bias)
                    for case, case_models in permutation_models.items():
                        list_models += case_models

                    learn_folds_count = len(learn_folds)
                    get_eval_logger().info(
                        "Start metric computation for folds [{}, {})".format(
                            fold_id_bias, fold_id_bias + learn_folds_count))
                    self._compute_metrics(self._metrics, permutation_models,
                                          learn_folds, skipped_folds,
                                          rest_folds)
                    get_eval_logger().info(
                        "Computation of metrics for  folds [{}, {}) is completed"
                        .format(fold_id_bias,
                                fold_id_bias + learn_folds_count))
                    fold_id_bias += learn_folds_count
                finally:
                    # Do it each step because don't want to occupy a lot of memory
                    splitter.clean_folds()
                    self._remove_models(list_models)

        finally:
            # Also sometimes we need to remove models and always need to try to remove folds directory.
            self._clean(splitter)

        return self._case_results
Exemple #4
0
    def remove_dir():
        """
        Remove default directory for folds if there're no files nut models. In other way it raises warning.

        Args:
            :return: Nothing.

        """
        try:
            if os.path.exists(_FoldFile.default_dir):
                os.rmdir(_FoldFile.default_dir)
        except OSError as err:
            get_eval_logger().warning(err.message)
    def remove_dir():
        """
        Remove default directory for folds if there're no files nut models. In other way it raises warning.

        Args:
            :return: Nothing.

        """
        try:
            if os.path.exists(_FoldFile.default_dir):
                os.rmdir(_FoldFile.default_dir)
        except OSError as err:
            get_eval_logger().warning(err.message)
    def proceed(self, splitter, fold_size, folds_count, fold_offset):
        """
        Run all processes to gain stats. It applies algorithms to fold files that gains from learning. It keeps
        stats inside models and models are stored in DataFrame. Columns are matched to the different algos and rows to
        the folds.

        Args:
            :param splitter: Splitter entity.
            :param fold_size: The size of fold.
            :param folds_count: Count of golds.
            :param fold_offset: The offset (count of folds that we want to skip).
            :return: return dict: keys metric to CaseEvaluationResult

        """
        try:
            folds_sets = splitter.create_fold_sets(fold_size, folds_count)
            fold_groups_files = splitter.fold_groups_files_generator(folds_sets,
                                                                     fold_offset)
            fold_id_bias = fold_offset

            for learn_folds, skipped_folds, rest_folds in fold_groups_files:
                if len(learn_folds) == 0:
                    continue
                list_models = []
                try:
                    permutation_models = self._fit_models(learn_folds, fold_id_bias)
                    for case, case_models in permutation_models.items():
                        list_models += case_models

                    learn_folds_count = len(learn_folds)
                    get_eval_logger().info("Start metric computation for folds [{}, {})"
                                           .format(fold_id_bias, fold_id_bias + learn_folds_count))
                    self._compute_metrics(self._metrics,
                                          permutation_models,
                                          learn_folds, skipped_folds, rest_folds)
                    get_eval_logger().info("Computation of metrics for  folds [{}, {}) is completed"
                                           .format(fold_id_bias, fold_id_bias + learn_folds_count))
                    fold_id_bias += learn_folds_count
                finally:
                    # Do it each step because don't want to occupy a lot of memory
                    splitter.clean_folds()
                    self._remove_models(list_models)

        finally:
            # Also sometimes we need to remove models and always need to try to remove folds directory.
            self._clean(splitter)

        return self._case_results
    def _fit_model(pool, case, fold_id, model_path):
        from catboost import CatBoost
        # Learn model
        make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        feature_count = pool.num_col()
        if "ignored_features" in case.get_params():
            ignored_features = case.get_params()["ignored_features"]
            if len(ignored_features) and max(ignored_features) >= feature_count:
                raise CatboostError("Error: input parameter contains feature indices wich are not available in pool: "
                                    "{}\n "
                                    "Check eval_feature set and ignored features options".format(ignored_features))
        get_eval_logger().debug('Learn model {} on fold #{}'.format(str(case), fold_id))
        cur_time = time.time()
        instance = CatBoost(params=case.get_params())
        instance.fit(pool)
        instance.save_model(fname=model_path)

        get_eval_logger().debug('Operation was done in {} seconds'.format(time.time() - cur_time))
        return FoldModel(case, model_path, fold_id)
Exemple #8
0
 def _remove_model_dir():
     try:
         if os.path.exists(FoldModelsHandler.__MODEL_DIR):
             os.rmdir(FoldModelsHandler.__MODEL_DIR)
     except OSError as err:
         get_eval_logger().warning(str(err))
Exemple #9
0
 def _remove_models(list_models):
     get_eval_logger().debug('Remove models {}'.format(str(list_models)))
     for model in list_models:
         model.delete()
Exemple #10
0
    def _fit_model(pool, case, fold_id, model_path):
        from catboost import CatBoost
        # Learn model
        make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        feature_count = pool.num_col()
        if "ignored_features" in case.get_params():
            ignored_features = case.get_params()["ignored_features"]
            if len(ignored_features
                   ) and max(ignored_features) >= feature_count:
                raise CatboostError(
                    "Error: input parameter contains feature indices wich are not available in pool: "
                    "{}\n "
                    "Check eval_feature set and ignored features options".
                    format(ignored_features))
        get_eval_logger().debug('Learn model {} on fold #{}'.format(
            str(case), fold_id))
        cur_time = time.time()
        instance = CatBoost(params=case.get_params())
        instance.fit(pool)
        instance.save_model(fname=model_path)

        get_eval_logger().debug(
            'Operation was done in {} seconds'.format(time.time() - cur_time))
        return FoldModel(case, model_path, fold_id)

    def _fit_models(self, learn_files, fold_id_bias):
        """
        Train models for each algorithm and learn dataset(folds). Than return them.

        Args:
 def _remove_model_dir():
     try:
         if os.path.exists(FoldModelsHandler.__MODEL_DIR):
             os.rmdir(FoldModelsHandler.__MODEL_DIR)
     except OSError as err:
         get_eval_logger().warning(str(err))
 def _remove_models(list_models):
     get_eval_logger().debug('Remove models {}'.format(str(list_models)))
     for model in list_models:
         model.delete()