コード例 #1
2
ファイル: forest16.py プロジェクト: djajetic/AutoML3
    def predict(self, X):
        """Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.

        Parameters
        ----------
        X : array-like of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        y: array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted values.
        """
        # Check data
        if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
            X = array2d(X, dtype=DTYPE)

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self)

        # Parallel loop
        all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
                             backend="threading")(
            delayed(_parallel_predict_regression)(
                self.estimators_[starts[i]:starts[i + 1]], X)
            for i in range(n_jobs))

        # Reduce
        y_hat = sum(all_y_hat) / len(self.estimators_)

        return y_hat
コード例 #2
0
    def predict_proba(self, X):

        check_is_fitted(self, "classes_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        if self.n_features_ != X.shape[1]:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is {0} and "
                             "input n_features is {1}."
                             "".format(self.n_features_, X.shape[1]))

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
                                                             self.n_jobs)

        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_predict_proba)(
                self.estimators_[starts[i]:starts[i + 1]],
                X,
                self.n_classes_)
            for i in range(n_jobs))

        # Reduce
        proba = sum(all_proba) / self.n_estimators

        return proba
コード例 #3
0
    def predict(self, X):
        """Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.

        Parameters
        ----------
        X : array-like of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        y: array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted values.
        """
        # Check data
        if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
            X = array2d(X, dtype=DTYPE)

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self)

        # Parallel loop
        all_y_hat = Parallel(n_jobs=n_jobs,
                             verbose=self.verbose,
                             backend="threading")(
                                 delayed(_parallel_predict_regression)
                                 (self.estimators_[starts[i]:starts[i + 1]], X)
                                 for i in range(n_jobs))

        # Reduce
        y_hat = sum(all_y_hat) / len(self.estimators_)

        return y_hat
コード例 #4
0
    def predict(self, X):
        """Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the estimators in the ensemble.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        Returns
        -------
        y : array of shape = [n_samples]
            The predicted values.
        """
        check_is_fitted(self, "estimators_features_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_predict_regression)(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]], X)
            for i in range(n_jobs))

        # Reduce
        y_hat = sum(all_y_hat) / self.n_estimators

        return y_hat
コード例 #5
0
ファイル: ensemble.py プロジェクト: yanqiuyan/sktime
    def predict_proba(self, X):
        """Predict class probabilities for X.
        The predicted class probabilities of an input sample are computed as
        the mean predicted class probabilities of the trees in the forest. The
        class probability of a single tree is the fraction of samples of the same
        class in a leaf.
        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.
        Returns
        -------
        p : array of shape = [n_samples, n_classes], or a list of n_outputs
            such arrays if n_outputs > 1.
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        check_is_fitted(self, 'estimators_')

        # Check data
        validate_X(X)
        check_X_is_univariate(X)
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        all_proba = Parallel(n_jobs=n_jobs,
                             verbose=self.verbose)(delayed(e.predict_proba)(X)
                                                   for e in self.estimators_)

        return np.sum(all_proba, axis=0) / len(self.estimators_)
コード例 #6
0
ファイル: forest.py プロジェクト: loli/sklearnef
    def _condense_parallel(self, X, function):
        r"""Runs a function of the trees in parallel and condenses the results."""
        check_is_fitted(self, 'n_outputs_')

        # Check data
        X = check_array(X, dtype=DTYPE, accept_sparse=False, order='C')

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        # Parallel loop
        all_res = Parallel(
            n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
                delayed(_parallel_helper)(e, function, X, check_input=False)
                for e in self.estimators_)
        # Reduce
        res = all_res[0]

        # Single output assumed
        for j in range(1, len(all_res)):
            res += all_res[j]

        res /= len(self.estimators_)

        return res
コード例 #7
0
    def predict_proba(self, X):
        print("ribes: empieza predict_proba de ribes_RandomForestClassifier_method2")
        #############################
        n_RFF = self.n_RFF
        if n_RFF is None:
            n_RFF = X.shape[1]
        sampler = ribes_RFFSampler(n_components = n_RFF)
        junk = sampler.fit_transform(X)
        #################################
        check_is_fitted(self, 'estimators_')
        # Check data
        junk = self._validate_X_predict(junk)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # avoid storing the output of every estimator by summing them here
        all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
                     for j in np.atleast_1d(self.n_classes_)]
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
            delayed(accumulate_prediction)(e.predict_proba, X, all_proba, lock)
            for e in self.estimators_)

        for proba in all_proba:
            proba /= len(self.estimators_)

        if len(all_proba) == 1:
            return all_proba[0]
        else:
            return all_proba
コード例 #8
0
    def decision_function(self, X):
        check_is_fitted(self, "classes_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])
        if self.n_features_ != X.shape[1]:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is {1} and "
                             "input n_features is {2} "
                             "".format(self.n_features_, X.shape[1]))
        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)
        all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_decision_function)(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]], X)
            for i in range(n_jobs))

        print 'decision_function>>>>>>'
        print all_decisions
        print 'decision_function>>>>>>'

        # Reduce
        decisions = sum(all_decisions) / self.n_estimators
        return decisions
コード例 #9
0
ファイル: forest.py プロジェクト: yanlirock/scikit-survival
    def _predict(self, predict_fn, X):
        check_is_fitted(self, 'estimators_')
        # Check data
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # avoid storing the output of every estimator by summing them here
        if predict_fn == "predict":
            y_hat = np.zeros((X.shape[0]), dtype=np.float64)
        else:
            y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)

        # Parallel loop
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs,
                 verbose=self.verbose,
                 **_joblib_parallel_args(require="sharedmem"))(
                     delayed(_accumulate_prediction)(getattr(e, predict_fn), X,
                                                     [y_hat], lock)
                     for e in self.estimators_)

        y_hat /= len(self.estimators_)

        return y_hat
コード例 #10
0
ファイル: regressors.py プロジェクト: odel4y/trackviewer
    def predict_all_estimators(self, sample):
        """Get the prediction of every estimator separated"""
        X, _ = extract_features.get_matrices_from_samples([sample])
        X = filter_feature_matrix(X, self.features)
        # Most of the code is directly copied from Scikit
        # Check data
        check_is_fitted(self.regressor, 'n_outputs_')

        # Check data
        X = check_array(X, dtype=DTYPE, accept_sparse="csr")
        if issparse(X) and (X.indices.dtype != np.intc or
                            X.indptr.dtype != np.intc):
            raise ValueError("No support for np.int64 index based "
                             "sparse matrices")

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self.regressor.n_estimators,
                                                        self.regressor.n_jobs)

        # Parallel loop
        all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.regressor.verbose,
                             backend="threading")(
            delayed(_parallel_helper)(e, 'predict', X, check_input=False)
            for e in self.regressor.estimators_)

        return all_y_hat
コード例 #11
0
    def _mean_fn(self, X, fn, acc, slice=None):
        # Helper class that accumulates an arbitrary function in parallel on the accumulator acc
        # and calls the function fn on each tree e and returns the mean output. The function fn
        # should take as input a tree e, and return another function g_e, which takes as input X, check_input
        # If slice is not None, but rather a tuple (start, end), then a subset of the trees from
        # index start to index end will be used. The returned result is essentially:
        # (mean over e in slice)(g_e(X)).
        check_is_fitted(self, 'estimators_')
        # Check data
        X = self._validate_X_predict(X)

        if slice is None:
            estimator_slice = self.estimators_
        else:
            estimator_slice = self.estimators_[slice[0]:slice[1]]

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(len(estimator_slice), self.n_jobs)
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs,
                 verbose=self.verbose,
                 **_joblib_parallel_args(require="sharedmem"))(
                     delayed(_accumulate_prediction)(fn(e), X, [acc], lock)
                     for e in estimator_slice)
        acc /= len(estimator_slice)
        return acc
コード例 #12
0
    def predict(self, X):
        """Predict regression target for X.
        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.
        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.
        Returns
        -------
        y : array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted values.
        """
        check_is_fitted(self, 'estimators_')
        # Check data
        validate_X(X)
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # Parallel loop
        y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(e.predict)(X, check_input=True) for e in self.estimators_)

        return np.sum(y_hat, axis=0) / len(self.estimators_)
コード例 #13
0
    def predict_log_proba(self, X):
        check_is_fitted(self, "classes_")
        if hasattr(self.base_estimator_, "predict_log_proba"):
            X = check_array(X, accept_sparse=['csr', 'csc'])

            if self.n_features_ != X.shape[1]:
                raise ValueError("Number of features of the model must "
                                 "match the input. Model n_features is {0} "
                                 "and input n_features is {1} "
                                 "".format(self.n_features_, X.shape[1]))

            n_jobs, n_estimators, starts = _partition_estimators(
                self.n_estimators, self.n_jobs)

            all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
                delayed(_parallel_predict_log_proba)
                (self.estimators_[starts[i]:starts[i + 1]], self.
                 estimators_features_[starts[i]:starts[i +
                                                       1]], X, self.n_classes_)
                for i in range(n_jobs))

            log_proba = all_log_proba[0]

            for j in range(1, len(all_log_proba)):
                log_proba = np.logaddexp(log_proba, all_log_proba[j])

            log_proba -= np.log(self.n_estimators)

            return log_proba

        else:
            return np.log(self.predict_proba(X))
コード例 #14
0
    def predict(self, X, eval_MSE=False):
        check_is_fitted(self, 'estimators_')
        # Check data
        X = self._check_X(X)
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # avoid storing the output of every estimator by summing them here
        if self.n_outputs_ > 1:
            y_hat_all = np.zeros(
                (X.shape[0], self.n_outputs_, self.n_estimators),
                dtype=np.float64)
        else:
            y_hat_all = np.zeros((X.shape[0], self.n_estimators),
                                 dtype=np.float64)

        # Parallel loop
        Parallel(n_jobs=n_jobs, verbose=self.verbose,
                 backend="threading")(delayed(save)(e.predict, X, i, y_hat_all)
                                      for i, e in enumerate(self.estimators_))

        y_hat = np.mean(y_hat_all, axis=1).flatten()
        if eval_MSE:
            sigma2 = np.std(y_hat_all, axis=1, ddof=1)**2.
            sigma2 = sigma2.flatten()
        return (y_hat, sigma2) if eval_MSE else y_hat
コード例 #15
0
    def fit(self, X, y, sample_weight=None):
        # classes_ = self.classes_
        random_state = check_random_state(self.random_state)
        # Convert data
        X, y = check_X_y(X, y, ['csr', 'csc'])

        # Remap output
        n_samples, self.n_features_ = X.shape
        y = self._validate_y(y)

        # Check parameters
        self._validate_estimator()

        if not self.warm_start or len(self.estimators_) == 0:
            # Free allocated memory, if any
            self.estimators_ = []
            self.estimators_switches_ = []

        n_more_estimators = self.n_estimators - len(self.estimators_)

        if n_more_estimators < 0:
            raise ValueError('n_estimators=%d must be larger or equal to '
                             'len(estimators_)=%d when warm_start==True'
                             % (self.n_estimators, len(self.estimators_)))

        elif n_more_estimators == 0:
            warn("Warm-start fitting without increasing n_estimators does not "
                 "fit new trees.")
            return self

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
                                                             self.n_jobs)

        # Advance random state to state after training
        # the first n_estimators
        if self.warm_start and len(self.estimators_) > 0:
            random_state.randint(MAX_INT, size=len(self.estimators_))

        seeds = random_state.randint(MAX_INT, size=n_more_estimators)

        all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_build_estimators)(
                n_estimators[i],
                self,
                X,
                y,
                seeds[starts[i]:starts[i + 1]],
                verbose=self.verbose)
            for i in range(n_jobs))

        # Reduce
        self.estimators_ += list(itertools.chain.from_iterable(
            t[0] for t in all_results))
        self.estimators_switches_ += list(itertools.chain.from_iterable(
            t[1] for t in all_results))

        return self
コード例 #16
0
ファイル: forest16.py プロジェクト: djajetic/AutoML3
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed as
        the mean predicted class probabilities of the trees in the forest.

        Parameters
        ----------
        X : array-like of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        p : array of shape = [n_samples, n_classes], or a list of n_outputs
            such arrays if n_outputs > 1.
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        # Check data
        if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
            X = array2d(X, dtype=DTYPE)

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self)
        
        # Bugfix for _parallel_predict_proba which expects a list for multi-label and integer for single-label problems
        if not isinstance(self.n_classes_, int) and len(self.n_classes_) == 1:
            n_classes_ = self.n_classes_[0]
        else:
            n_classes_ = self.n_classes_
        # Parallel loop
        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
                             backend="threading")(
            delayed(_parallel_predict_proba)(
                self.estimators_[starts[i]:starts[i + 1]],
                X,
                n_classes_,
                self.n_outputs_)
            for i in range(n_jobs))

        # Reduce
        proba = all_proba[0]

        if self.n_outputs_ == 1:
            for j in xrange(1, len(all_proba)):
                proba += all_proba[j]

            proba /= len(self.estimators_)

        else:
            for j in xrange(1, len(all_proba)):
                for k in xrange(self.n_outputs_):
                    proba[k] += all_proba[j][k]

            for k in xrange(self.n_outputs_):
                proba[k] /= self.n_estimators

        return proba
コード例 #17
0
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed as
        the mean predicted class probabilities of the trees in the forest.

        Parameters
        ----------
        X : array-like of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        p : array of shape = [n_samples, n_classes], or a list of n_outputs
            such arrays if n_outputs > 1.
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        # Check data
        if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
            X = array2d(X, dtype=DTYPE)

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self)

        # Bugfix for _parallel_predict_proba which expects a list for multi-label and integer for single-label problems
        if not isinstance(self.n_classes_, int) and len(self.n_classes_) == 1:
            n_classes_ = self.n_classes_[0]
        else:
            n_classes_ = self.n_classes_
        # Parallel loop
        all_proba = Parallel(n_jobs=n_jobs,
                             verbose=self.verbose,
                             backend="threading")(
                                 delayed(_parallel_predict_proba)
                                 (self.estimators_[starts[i]:starts[i + 1]], X,
                                  n_classes_, self.n_outputs_)
                                 for i in range(n_jobs))

        # Reduce
        proba = all_proba[0]

        if self.n_outputs_ == 1:
            for j in xrange(1, len(all_proba)):
                proba += all_proba[j]

            proba /= len(self.estimators_)

        else:
            for j in xrange(1, len(all_proba)):
                for k in xrange(self.n_outputs_):
                    proba[k] += all_proba[j][k]

            for k in xrange(self.n_outputs_):
                proba[k] /= self.n_estimators

        return proba
コード例 #18
0
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed as
        the mean predicted class probabilities of the trees in the forest. The
        class probability of a single tree is the fraction of samples of the same
        class in a leaf.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, it will be converted to
            ``dtype=np.float32`` and if a sparse matrix is provided
            to a sparse ``csr_matrix``.

        Returns
        -------
        p : array of shape = [n_samples, n_classes], or a list of n_outputs
            such arrays if n_outputs > 1.
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        # Check data
        if self.scaling:
            X = self._scale(X)
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # Parallel loop
        all_proba = Parallel(n_jobs=n_jobs,
                             verbose=self.verbose,
                             backend="threading")(delayed(_parallel_helper)(
                                 e, 'predict_proba', X, check_input=False)
                                                  for e in self.estimators_)

        # Reduce
        proba = all_proba[0]

        if self.n_outputs_ == 1:
            for j in range(1, len(all_proba)):
                proba += self.estimator_weights[j] * all_proba[j]

            #proba /= len(self.estimators_)
            proba /= np.sum(self.estimator_weights[j])

        else:
            for j in range(1, len(all_proba)):
                for k in range(self.n_outputs_):
                    proba[k] += self.estimator_weights[j] * all_proba[j][k]

            for k in range(self.n_outputs_):
                proba[k] /= np.sum(self.estimator_weights[j])

        return proba
コード例 #19
0
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed as
        the mean predicted class probabilities of the trees in the forest. The
        class probability of a single tree is the fraction of samples of the same
        class in a leaf.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, it will be converted to
            ``dtype=np.float32`` and if a sparse matrix is provided
            to a sparse ``csr_matrix``.

        Returns
        -------
        p : array of shape = [n_samples, n_classes], or a list of n_outputs
            such arrays if n_outputs > 1.
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        # Check data
        if self.scaling:
            X = self._scale(X)
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # Parallel loop
        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
                             backend="threading")(
            delayed(_parallel_helper)(e, 'predict_proba', X,
                                      check_input=False)
            for e in self.estimators_)

        # Reduce
        proba = all_proba[0]

        if self.n_outputs_ == 1:
            for j in range(1, len(all_proba)):
                proba += self.estimator_weights[j]*all_proba[j]

            #proba /= len(self.estimators_)
            proba /= np.sum(self.estimator_weights[j])

        else:
            for j in range(1, len(all_proba)):
                for k in range(self.n_outputs_):
                    proba[k] += self.estimator_weights[j]*all_proba[j][k]

            for k in range(self.n_outputs_):
                proba[k] /= np.sum(self.estimator_weights[j])

        return proba
コード例 #20
0
ファイル: forest.py プロジェクト: Kinteshi/RFEP
    def predict(self, X, mask):
        """Predict regression target for X.
        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.
        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        mask : array que armazena a informaçao de haver ou não haver a arvore na floresta.

        fileCache : Rota onde se encontra as arvores que seram trabalhadas, lembrando que cada colecao e cada fold
            possui um conjunto unico de arvores
        Returns
        -------
        y : array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted values.
        """

        mask = [i == '1' or i == 1 for i in mask]

        self.n_outputs_ = 1

        check_is_fitted(self, 'estimators_')

        # Check data
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # avoid storing the output of every estimator by summing them here
        if self.n_outputs_ > 1:
            y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
        else:
            y_hat = np.zeros((X.shape[0]), dtype=np.float64)

        # Parallel loop
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs,
                 verbose=self.verbose,
                 **_joblib_parallel_args(require="sharedmem"))(
                     delayed(_accumulate_prediction_mod)(e.predict, X, gene,
                                                         [y_hat], lock)
                     for e, gene in zip(self.estimators_, mask))

        n_trees = 0
        for g in mask:
            if g:
                n_trees += 1

        y_hat /= n_trees

        return y_hat
コード例 #21
0
ファイル: forest.py プロジェクト: Kinteshi/RFEP
    def oob_predict(self, X, y, genes, parallel=True):
        """
        Compute out-of-bag prediction.
        """
        X = check_array(X, dtype=DTYPE, accept_sparse='csr')

        n_samples = y.shape[0]

        predictions = np.zeros((n_samples, self.n_outputs_))
        n_predictions = np.zeros((n_samples, self.n_outputs_))

        n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, None)

        genes = [i == '1' or i == 1 for i in genes]

        if parallel:
            # Assign chunk of trees to jobs
            n_jobs, _, _ = _partition_estimators(self.n_estimators,
                                                 self.n_jobs)

            # Parallel loop
            lock = threading.Lock()
            Parallel(n_jobs=n_jobs,
                     verbose=self.verbose,
                     **_joblib_parallel_args(require="sharedmem"))(
                         delayed(_oob_accumulate_prediction)
                         (e.predict, X, gene, [predictions, n_predictions],
                          lock, n_samples, n_samples_bootstrap,
                          self.n_outputs_, e.random_state)
                         for e, gene in zip(self.estimators_, genes))
        else:
            for e, gene in zip(self.estimators_, genes):
                if gene:
                    unsampled_indices = _generate_unsampled_indices(
                        e.random_state, n_samples, n_samples_bootstrap)
                    p_estimator = e.predict(X[unsampled_indices, :],
                                            check_input=False)

                    if self.n_outputs_ == 1:
                        p_estimator = p_estimator[:, np.newaxis]

                    predictions[unsampled_indices, :] += p_estimator
                    n_predictions[unsampled_indices, :] += 1
                else:
                    pass

        if (n_predictions == 0).any():
            warn("Some inputs do not have OOB scores. "
                 "This probably means too few trees were used "
                 "to compute any reliable oob estimates.")
            n_predictions[n_predictions == 0] = 1

        predictions /= n_predictions
        return predictions
コード例 #22
0
    def predict_log_proba(self, X):
        """Predict class log-probabilities for X.

        The predicted class log-probabilities of an input sample is computed as
        the log of the mean predicted class probabilities of the base
        estimators in the ensemble.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        Returns
        -------
        p : array of shape = [n_samples, n_classes]
            The class log-probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        check_is_fitted(self, "classes_")
        if hasattr(self.base_estimator_, "predict_log_proba"):
            # Check data
            X = check_array(X, accept_sparse=['csr', 'csc'])

            if self.n_features_ != X.shape[1]:
                raise ValueError("Number of features of the model must "
                                 "match the input. Model n_features is {0} "
                                 "and input n_features is {1} "
                                 "".format(self.n_features_, X.shape[1]))

            # Parallel loop
            n_jobs, n_estimators, starts = _partition_estimators(
                self.n_estimators, self.n_jobs)

            all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
                delayed(_parallel_predict_log_proba)(
                    self.estimators_[starts[i]:starts[i + 1]],
                    self.estimators_features_[starts[i]:starts[i + 1]],
                    X,
                    self.n_classes_)
                for i in range(n_jobs))

            # Reduce
            log_proba = all_log_proba[0]

            for j in range(1, len(all_log_proba)):
                log_proba = np.logaddexp(log_proba, all_log_proba[j])

            log_proba -= np.log(self.n_estimators)

            return log_proba

        else:
            return np.log(self.predict_proba(X))
コード例 #23
0
ファイル: iforest.py プロジェクト: fulQuan/Isolation-Forest
    def predict(self, X):
        """Predict anomaly score of X with the IsolationForest algorithm.

        The anomaly score of an input sample is computed as
        the mean anomaly scores of the trees in the forest.

        The measure of normality of an observation given a tree is the depth
        of the leaf containing this observation, which is equivalent to
        the number of splitting required to isolate this point. In case of
        several observations n_left in the leaf, the average length path of
        a n_left samples isolation tree is added.

        Parameters
        ----------
        X : array-like or sparse matrix of shape (n_samples, n_features)
            The input samples. Internally, it will be converted to
            ``dtype=np.float32`` and if a sparse matrix is provided
            to a sparse ``csr_matrix``.

        Returns
        -------
        scores : array of shape (n_samples,)
            The anomaly score of the input samples.
            The lower, the more normal.
        """
        # code structure from ForestClassifier/predict_proba
        # Check data
        X = check_array(X, dtype=DTYPE, accept_sparse="csr")
        n_samples = X.shape[0]

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(self.n_estimators,
                                                        self.n_jobs)

        # Parallel loop
        results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
                           backend="threading")(
            delayed(_parallel_helper)(tree.tree_, 'apply_depth', X)
            for tree in self.estimators_)

        # Reduce
        results = np.array(results)
        scores = np.zeros(n_samples)
        depth = np.mean(results, axis=0)

        for k in range(n_samples):
            scores[k] = np.power(2, - depth[k] / self._cost(self.max_samples))

        return scores
コード例 #24
0
ファイル: iforest.py プロジェクト: aiyi2099/Isolation-Forest
    def predict(self, X):
        """Predict anomaly score of X with the IsolationForest algorithm.

        The anomaly score of an input sample is computed as
        the mean anomaly scores of the trees in the forest.

        The measure of normality of an observation given a tree is the depth
        of the leaf containing this observation, which is equivalent to
        the number of splitting required to isolate this point. In case of
        several observations n_left in the leaf, the average length path of
        a n_left samples isolation tree is added.

        Parameters
        ----------
        X : array-like or sparse matrix of shape (n_samples, n_features)
            The input samples. Internally, it will be converted to
            ``dtype=np.float32`` and if a sparse matrix is provided
            to a sparse ``csr_matrix``.

        Returns
        -------
        scores : array of shape (n_samples,)
            The anomaly score of the input samples.
            The lower, the more normal.
        """
        # code structure from ForestClassifier/predict_proba
        # Check data
        X = check_array(X, dtype=DTYPE, accept_sparse="csr")
        n_samples = X.shape[0]

        # Assign chunk of trees to jobs
        n_jobs, n_trees, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        # Parallel loop
        results = Parallel(
            n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
                delayed(_parallel_helper)(tree.tree_, 'apply_depth', X)
                for tree in self.estimators_)

        # Reduce
        results = np.array(results)
        scores = np.zeros(n_samples)
        depth = np.mean(results, axis=0)

        for k in range(n_samples):
            scores[k] = np.power(2, -depth[k] / self._cost(self.max_samples))

        return scores
コード例 #25
0
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed as
        the mean predicted class probabilities of the base estimators in the
        ensemble. If base estimators do not implement a ``predict_proba``
        method, then it resorts to voting and the predicted class probabilities
        of an input sample represents the proportion of estimators predicting
        each class.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        Returns
        -------
        p : array of shape = [n_samples, n_classes]
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute `classes_`.
        """
        check_is_fitted(self, "classes_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        if self.n_features_ != X.shape[1]:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is {0} and "
                             "input n_features is {1}."
                             "".format(self.n_features_, X.shape[1]))

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
                                                             self.n_jobs)

        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_predict_proba)(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]],
                X,
                self.n_classes_)
            for i in range(n_jobs))

        # Reduce
        proba = sum(all_proba) / self.n_estimators

        return proba
コード例 #26
0
ファイル: ensemble.py プロジェクト: shafiahmed/skpro
    def predict(self, X):
        """ Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        averaged predicted distributions of the estimators in the ensemble.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        Returns
        -------
        y : skpro.base.Distribution = [n_samples]
            The predicted bagged distributions.
        """

        # Ensure estimator were being fitted
        check_is_fitted(self, "estimators_features_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        # Parallel loop
        from sklearn.ensemble.base import _partition_estimators
        n_jobs, n_estimators, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        def _parallel_predict_regression(estimators, estimators_features, X):
            """ Private function used to compute predictions within a job. """
            return [
                estimator.predict(X[:, features])
                for estimator, features in zip(estimators, estimators_features)
            ]

        # Obtain predictions
        all_y_hat = [
            _parallel_predict_regression(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]], X)
            for i in range(n_jobs)
        ]

        # Reduce
        return self._distribution()(self, X, all_y_hat, n_estimators)
コード例 #27
0
    def predict_proba(self, X):
        check_is_fitted(self, "classes_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_predict_proba)(
                self.estimators_[starts[i]:starts[i + 1]], X, self.n_classes_)
            for i in range(n_jobs))

        # Reduce
        proba = sum(all_proba) / self.n_estimators

        return proba
コード例 #28
0
    def decision_function(self, X):
        """Average of the decision functions of the base classifiers.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        Returns
        -------
        score : array, shape = [n_samples, k]
            The decision function of the input samples. The columns correspond
            to the classes in sorted order, as they appear in the attribute
            ``classes_``. Regression and binary classification are special
            cases with ``k == 1``, otherwise ``k==n_classes``.

        """
        check_is_fitted(self, "classes_")

        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        if self.n_features_ != X.shape[1]:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is {0} and "
                             "input n_features is {1} "
                             "".format(self.n_features_, X.shape[1]))

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
                                                             self.n_jobs)

        all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_decision_function)(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]],
                X)
            for i in range(n_jobs))

        # Reduce
        decisions = sum(all_decisions) / self.n_estimators

        return decisions
コード例 #29
0
def forest_regressor_predict(self, X):
    """Copy of the RandomForest Regression predict code, while retaining the entire ensemble in self.all_y_hat """
    # Check data
    X = self._validate_X_predict(X)

    # Assign chunk of trees to jobs
    n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

    # Parallel loop
    all_y_hat = Parallel(
        n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
            delayed(_parallel_helper)(e, 'predict', X, check_input=False)
            for e in self.estimators_)

    # Reduce
    y_hat = sum(all_y_hat) / len(self.estimators_)
    """ This is the ONLY line changed: Save the decision tree results to the object for external use """
    self.all_y_hat = all_y_hat

    return y_hat
コード例 #30
0
ファイル: arc_x4.py プロジェクト: naranil/ensemble_comparison
    def predict_proba(self, X):
        check_is_fitted(self, "classes_")
        # Check data
        X = check_array(X, accept_sparse=['csr', 'csc'])

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
                                                             self.n_jobs)

        all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_predict_proba)(
                self.estimators_[starts[i]:starts[i + 1]],
                X,
                self.n_classes_)
            for i in range(n_jobs))

        # Reduce
        proba = sum(all_proba) / self.n_estimators

        return proba
コード例 #31
0
ファイル: cc.py プロジェクト: xiaohan2012/mynlp
    def fit(self, X, Y):
        X, Y = map(np.atleast_2d, (X, Y))
        assert X.shape[0] == Y.shape[0]
        Ny = Y.shape[1]

        self.estimators_ = []
        n_jobs, n_estimators, starts = _partition_estimators(Ny, self.n_jobs)

        results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_build_estimator)(
                n_estimators[i],
                self,
                X,
                Y,
                starts[i],
                verbose=self.verbose)
            for i in range(n_jobs))

        self.estimators_ += list(itertools.chain.from_iterable(results))

        return self
コード例 #32
0
    def predict(self, X):
        """Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        y : array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted values.
        """
        check_is_fitted(self, 'estimators_')
        # Check data
        X = self._validate_X_predict(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)

        # avoid storing the output of every estimator by summing them here
        if self.n_outputs_ > 1:
            y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
        else:
            y_hat = np.zeros((X.shape[0]), dtype=np.float64)

        # Parallel loop
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
            delayed(accumulate_prediction)(e.predict, X, [y_hat], lock)
            for e in self.estimators_)

        y_hat /= len(self.estimators_)

        return y_hat
コード例 #33
0
ファイル: forest.py プロジェクト: Kinteshi/RFEP
    def oob_predict_buffer(self, X, y, parallel=True):

        X = check_array(X, dtype=DTYPE, accept_sparse='csr')

        n_samples = X.shape[0]

        n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, None)

        prediction_buffer = np.zeros(
            (n_samples_bootstrap, len(self.estimators_)), dtype='float32')

        prediction_buffer[:, :] = np.nan

        if parallel:
            # Assign chunk of trees to jobs
            n_jobs, _, _ = _partition_estimators(self.n_estimators,
                                                 self.n_jobs)

            # Parallel loop
            lock = threading.Lock()
            Parallel(
                n_jobs=n_jobs,
                verbose=self.verbose,
                **_joblib_parallel_args(require="sharedmem"))(
                    delayed(_oob_bufferize_prediction)
                    (e.predict, X, estimator, prediction_buffer, lock,
                     n_samples, n_samples_bootstrap, self.n_outputs_,
                     e.random_state) for e, estimator in zip(
                         self.estimators_, range(0, len(self.estimators_))))
        else:
            for e, estimator in zip(self.estimators_,
                                    range(0, len(self.estimators_))):
                unsampled_indices = _generate_unsampled_indices(
                    e.random_state, n_samples, n_samples_bootstrap)
                p_estimator = e.predict(X[unsampled_indices, :],
                                        check_input=False)
                prediction_buffer[unsampled_indices, estimator] = p_estimator

        self.__buffer = prediction_buffer
コード例 #34
0
    def predict_proba(self, X):
        # Check data
        X = self.check_X(X)

        # Assign chunk of trees to jobs
        n_jobs, _, _ = _partition_estimators(self.n_estimators, 2)

        # avoid storing the output of every estimator by summing them here
        all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
                     for j in np.atleast_1d(self.n_classes_)]
        lock = threading.Lock()
        Parallel(n_jobs=n_jobs, backend="threading")(
            delayed(accumulate_prediction)(e.predict_proba, X, all_proba, lock)
            for e in self.estimators_)

        for proba in all_proba:
            proba /= len(self.estimators_)

        if len(all_proba) == 1:
            return all_proba[0]
        else:
            return all_proba
コード例 #35
0
    def decision_function(self, X):
        """Average of the decision functions of the base classifiers."""
        check_is_fitted(self, "classes_")

        X = check_array(X, accept_sparse=['csr', 'csc'])

        if self.n_features_ != X.shape[1]:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is {0} and "
                             "input n_features is {1} "
                             "".format(self.n_features_, X.shape[1]))

        n_jobs, n_estimators, starts = _partition_estimators(
            self.n_estimators, self.n_jobs)

        all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_decision_function)(
                self.estimators_[starts[i]:starts[i + 1]],
                self.estimators_features_[starts[i]:starts[i + 1]], X)
            for i in range(n_jobs))

        decisions = sum(all_decisions) / self.n_estimators

        return decisions
コード例 #36
0
    def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
        """Build an ensemble of estimators from the training
           set (X, y) using a sliding window.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        y : array-like, shape = [n_samples]
            The target values (class labels in classification, real numbers in
            regression).

        max_samples : int or float, optional (default=None)
            Argument to use instead of self.max_samples.

        max_depth : int, optional (default=None)
            Override value used when constructing base estimator. Only
            supported if the base estimator has a max_depth parameter.

        sample_weight : array-like, shape = [n_samples] or None
            Sample weights. If None, then samples are equally weighted.
            Note that this is supported only if the base estimator supports
            sample weighting.

        Returns
        -------
        self : object
            Returns self.
        """
        random_state = check_random_state(self.random_state)

        # Convert data
        X, y = check_X_y(X, y, ['csr', 'csc'])

        # Remap output
        n_samples, self.n_features_ = X.shape
        self._n_samples = n_samples
        y = self._validate_y(y)

        if not 0 < self.window_size <= self.n_features_:
            raise ValueError("window_size not valid")

        if not 0 < self.stride <= self.window_size:
            raise ValueError("stride not valid")

        # Check parameters
        if not self.circular_features:
            self.n_windows_ = int(
                np.ceil((self.n_features_ - self.window_size) / self.stride) +
                1)
        else:
            # it is independent from window_size
            self.n_windows_ = len(range(0, self.n_features_, self.stride))

        if self.n_estimators is None:
            self.n_estimators = self.n_estimators_window * self.n_windows_
        else:
            self.n_estimators_window = int(self.n_estimators / self.n_windows_)
            self.n_estimators = self.n_estimators_window * self.n_windows_

        # _check_estimator(self.base_estimator)  # ?
        self._validate_estimator()

        if max_depth is not None:
            self.base_estimator_.max_depth = max_depth

        # Validate max_samples
        if max_samples is None:
            max_samples = self.max_samples
        elif not isinstance(max_samples, (numbers.Integral, np.integer)):
            max_samples = int(max_samples * X.shape[0])

        if not 0 < max_samples <= X.shape[0]:
            raise ValueError("max_samples must be in (0, n_samples]")

        # Store validated integer row sampling value
        self._max_samples = max_samples

        # Validate max_features
        if isinstance(self.max_features, (numbers.Integral, np.integer)):
            max_features = self.max_features
        else:  # float
            max_features = int(self.max_features * self.window_size)

        if (self.bootstrap_features and max_features <= 0) or \
                not 0 < max_features <= self.window_size:
            raise ValueError("max_features must be in (0, window_size] if not"
                             " bootstrap_features.")

        # Store validated integer feature sampling value
        self._max_features = max_features

        # Other checks
        if not self.bootstrap and self.oob_score and \
                self._max_samples >= n_samples:
            raise ValueError("Out of bag estimation only available"
                             " if bootstrap=True or max_samples < n_samples")

        # if self.warm_start and self.oob_score:
        #     raise ValueError("Out of bag estimate only available"
        #                      " if warm_start=False")

        # if hasattr(self, "oob_score_") and self.warm_start:
        #     del self.oob_score_

        # if not self.warm_start or len(self.estimators_) == 0:
        # if len(self.estimators_) == 0:  # TODO think about warm_start or adding estimators
        # Free allocated memory, if any
        self.estimators_ = []
        self.estimators_features_ = []
        self._estimators_samples = []

        n_more_estimators = self.n_estimators - len(self.estimators_)

        if n_more_estimators < 1:
            raise ValueError('n_estimators=%d must be larger or equal to 1' %
                             (self.n_estimators))

        # using oob_score, take only "code_size" best estimators
        if isinstance(self.code_size, (numbers.Integral, np.integer)):
            self.code_size_ = self.code_size
        elif isinstance(self.code_size, (numbers.Real, np.float)):  # float
            self.code_size_ = int(self.code_size * self.n_estimators)
        elif self.code_size is None or self.code_size == 'auto':
            self.code_size_ = self.n_estimators
        elif self.code_size == 'sqrt':
            self.code_size_ = int(np.sqrt(self.n_estimators))
        elif self.code_size == 'log2':
            self.code_size_ = int(np.log2(self.n_estimators))
        else:
            raise ValueError("Value for code_size '{}' unrecognized".format(
                self.code_size))

        if self.code_size <= 0:
            raise ValueError("code_size should be greater than 0, got {1}"
                             "".format(self.code_size))

        if self.verbose:
            print("You are about to generate {0} estimators for {1} windows, "
                  "for a total of {2} estimators.".format(
                      self.n_estimators_window, self.n_windows_,
                      self.n_estimators))

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(
            n_more_estimators, self.n_jobs)
        total_n_estimators = sum(n_estimators)

        # Advance random state to state after training
        # the first n_estimators
        # if self.warm_start and len(self.estimators_) > 0:
        #     random_state.randint(MAX_INT, size=len(self.estimators_))
        if self.single_seed_features:
            # different features inside a single windows, then shift
            seeds_features = np.tile(
                random_state.randint(MAX_INT, size=self.n_estimators_window),
                self.n_windows_)
            seeds_max_features = np.tile(
                random_state.randint(MAX_INT, size=self.n_estimators_window),
                self.n_windows_)
        else:
            seeds_features = random_state.randint(MAX_INT,
                                                  size=n_more_estimators)
            seeds_max_features = random_state.randint(MAX_INT,
                                                      size=n_more_estimators)

        self._seeds_features = seeds_features
        self._seeds_max_features = seeds_max_features
        if self.verbose > 1:
            print("Seeds features: %s" % seeds_features)

        if self.single_seed_samples:
            seeds = np.tile(random_state.randint(MAX_INT, size=1),
                            n_more_estimators)
        else:
            seeds = random_state.randint(MAX_INT, size=n_more_estimators)

        self._seeds = seeds
        if self.verbose > 1:
            print("Seeds samples: %s" % seeds)

        start_index = (iter(
            sorted(self.n_estimators_window *
                   range(0, self.n_features_, self.stride))))

        self.estimators_splits_ = []
        all_results = jl.Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            jl.delayed(_parallel_build_estimators)
            (n_estimators[i],
             self,
             X,
             y,
             sample_weight,
             seeds_features[starts[i]:starts[i + 1]],
             seeds[starts[i]:starts[i + 1]],
             seeds_max_features[starts[i]:starts[i + 1]],
             total_n_estimators,
             start_index=list(itertools.islice(start_index, n_estimators[i])),
             verbose=self.verbose,
             circular_features=self.circular_features,
             draw_max_features=self.draw_max_features) for i in range(n_jobs))

        # Reduce
        self.estimators_ += list(
            itertools.chain.from_iterable(t[0] for t in all_results))
        self.estimators_features_ += list(
            itertools.chain.from_iterable(t[1] for t in all_results))
        self._estimators_samples += list(
            itertools.chain.from_iterable(t[2] for t in all_results))
        self.estimators_splits_ += list(
            itertools.chain.from_iterable(t[3] for t in all_results))

        if self.oob_score:
            self._set_oob_score(X, y)

        # sliding window
        # for start in range(0, n_features - self.window_size + 1 + (
        #         n_features - self.window_size) % self.stride, self.stride):
        #     samples, features = [], []
        #     y_binary_splits = []
        #     for p in range(self.n_estimators):
        #         random_state = check_random_state(self.random_state)
        #
        #         # prepare the features
        #         # 1. choose randomly the max features belonging to [0, k-1]
        #         # 1a. how many?
        #         n_features_window = np.random.randint(1, min(
        #             self.max_features, n_features - start - 1))
        #         # 1b. which ones?
        #         features.append(np.random.randint(
        #             start,
        #             min(start + self.window_size, n_features),
        #             n_features_window))
        #
        #         # 2. split y and binarise it
        #         y_binary_splits.append(random_binarizer(y))
        #
        #     estimators_ = jl.Parallel(n_jobs=self.n_jobs)(
        #         jl.delayed(_fit_binary)(
        #             self.base_estimator, X[mask][:, feats], y_binary[mask])
        #         for mask, feats, y_binary in zip(samples, features, y_binary_splits))
        #
        #     self.estimators_.extend(estimators_)
        #     self.estimator_features_.extend(features)
        #     self.estimator_splits_.extend(y_binary_splits)

        return self
コード例 #37
0
    def fit(self, X, y):
        """Build a Bagging ensemble of estimators from the training
           set (X, y).

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.

        y : array-like, shape = [n_samples]
            The target values (class labels in classification, real numbers in
            regression).


        Returns
        -------
        self : object
            Returns self.
        """
        random_state = check_random_state(self.random_state)

        # Convert data
        X, y = check_X_y(X, y, ['csr', 'csc'])

        # Remap output
        n_samples, self.n_features_ = X.shape
        y = self._validate_y(y)

        # Check parameters
        self._validate_estimator()

        if isinstance(self.max_samples, (numbers.Integral, np.integer)):
            max_samples = self.max_samples
        else:  # float
            max_samples = int(self.max_samples * X.shape[0])

        if not (0 < max_samples <= X.shape[0]):
            raise ValueError("max_samples must be in (0, n_samples]")

        if isinstance(self.max_features, (numbers.Integral, np.integer)):
            max_features = self.max_features
        else:  # float
            max_features = int(self.max_features * self.n_features_)

        if not (0 < max_features <= self.n_features_):
            raise ValueError("max_features must be in (0, n_features]")

        if not self.bootstrap and self.oob_score:
            raise ValueError("Out of bag estimation only available"
                             " if bootstrap=True")

        if self.warm_start and self.oob_score:
            raise ValueError("Out of bag estimate only available"
                             " if warm_start=False")

        if hasattr(self, "oob_score_") and self.warm_start:
            del self.oob_score_

        if not self.warm_start or len(self.estimators_) == 0:
            # Free allocated memory, if any
            self.estimators_ = []
            self.estimators_samples_ = []
            self.estimators_features_ = []

        n_more_estimators = self.n_estimators - len(self.estimators_)

        if n_more_estimators < 0:
            raise ValueError('n_estimators=%d must be larger or equal to '
                             'len(estimators_)=%d when warm_start==True' %
                             (self.n_estimators, len(self.estimators_)))

        elif n_more_estimators == 0:
            warn("Warm-start fitting without increasing n_estimators does not "
                 "fit new trees.")
            return self

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(
            n_more_estimators, self.n_jobs)

        # Advance random state to state after training
        # the first n_estimators
        if self.warm_start and len(self.estimators_) > 0:
            random_state.randint(MAX_INT, size=len(self.estimators_))

        seeds = random_state.randint(MAX_INT, size=n_more_estimators)

        all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            # TEF: changed following call to balanced procedure:
            delayed(_parallel_build_balanced_estimators)(
                n_estimators[i],
                self,
                X,
                y,
                seeds[starts[i]:starts[i + 1]],
                verbose=self.verbose) for i in range(n_jobs))

        # Reduce
        self.estimators_ += list(
            itertools.chain.from_iterable(t[0] for t in all_results))
        self.estimators_samples_ += list(
            itertools.chain.from_iterable(t[1] for t in all_results))
        self.estimators_features_ += list(
            itertools.chain.from_iterable(t[2] for t in all_results))

        if self.oob_score:
            self._set_oob_score(X, y)

        return self
コード例 #38
0
ファイル: pulearning.py プロジェクト: lcreyes/GI
    def fit(self, X, y, sample_weight=None):
        """Build a Bagging ensemble of estimators from the training
           set (X, y).
        Parameters
        ----------
        X : {array-like, sparse matrix} of shape = [n_samples, n_features]
            The training input samples. Sparse matrices are accepted only if
            they are supported by the base estimator.
        y : array-like, shape = [n_samples]
            The target values (class labels in classification, real numbers in
            regression).
        sample_weight : array-like, shape = [n_samples] or None
            Sample weights. If None, then samples are equally weighted.
            Note that this is supported only if the base estimator supports
            sample weighting.
        Returns
        -------
        self : object
            Returns self.
        """
        random_state = check_random_state(self.random_state)

        # Convert data
        X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])

        # Remap output
        n_samples, self.n_features_ = X.shape
        y = self._validate_y(y)

        # Check parameters
        self._validate_estimator()

        if isinstance(self.max_samples, (numbers.Integral, np.integer)):
            max_samples = self.max_samples
        else:  # float
            max_samples = int(self.max_samples * X.shape[0])

        if not (0 < max_samples <= X.shape[0]):
            raise ValueError("max_samples must be in (0, n_samples]")

        if isinstance(self.max_features, (numbers.Integral, np.integer)):
            max_features = self.max_features
        else:  # float
            max_features = int(self.max_features * self.n_features_)

        if not (0 < max_features <= self.n_features_):
            raise ValueError("max_features must be in (0, n_features]")

        if not self.bootstrap and self.oob_score:
            raise ValueError("Out of bag estimation only available"
                             " if bootstrap=True")

        # Free allocated memory, if any
        self.estimators_ = None

        # Parallel loop
        n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
                                                             self.n_jobs)
        seeds = random_state.randint(MAX_INT, size=self.n_estimators)

        all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_parallel_build_estimators)(
                n_estimators[i],
                self,
                X,
                y,
                sample_weight,
                seeds[starts[i]:starts[i + 1]],
                verbose=self.verbose)
            for i in range(n_jobs))

        # Reduce
        self.estimators_ = list(itertools.chain.from_iterable(
            t[0] for t in all_results))
        self.estimators_samples_ = list(itertools.chain.from_iterable(
            t[1] for t in all_results))
        self.estimators_features_ = list(itertools.chain.from_iterable(
            t[2] for t in all_results))

        if self.oob_score:
            self._set_oob_score(X, y)

        return self