Beispiel #1
0
def _impose_f_order(X):
    """Helper Function"""
    # important to access flags instead of calling np.isfortran,
    # this catches corner cases.
    if X.flags.c_contiguous:
        return array2d(X.T, copy=False, order='F'), True
    else:
        return array2d(X, copy=False, order='F'), False
    def fit(self, inp, y, sample_weight=None):
        self.classes_, y = numpy.unique(y, return_inverse=True)
        self.n_classes_ = len(self.classes_)

        if self.training_params is None:
            training_params = {}
        else:
            training_params = self.training_params

        y = OneHotEncoder().fit_transform(array2d(y).transpose()).toarray()
        n_features = inp.shape[1]
        random_state = check_random_state(self.random_state)

        if self.hidden_neurons == "a":
            hidden_neurons = (n_features + self.n_classes_)/2
        else:
            hidden_neurons = self.hidden_neurons
        if self.independent_outputs:
            graph = imlgraph(
                (n_features, hidden_neurons, self.n_classes_), biases=self.bias
            )
        else:
            graph = mlgraph(
                (n_features, hidden_neurons, self.n_classes_), biases=self.bias
            )
        self.network_ = ffnet(graph, random_state)
        self.network_.randomweights()
        trainer = getattr(self.network_, "train_" + self.training_fn)
        trainer(inp, y, **training_params)
        return self
Beispiel #3
0
def evd_transform(X, components, ncomps=-1):
    X = validation.array2d(X)
    if ncomps == -1:
        X_transformed = np.dot(X, components)
        # X_transformed = np.dot(components.T, X.T)
    else:
        X_transformed = np.dot(X, components[0:ncomps])
    return X_transformed
 def predict_proba(self, inp):
     inp = array2d(inp)
     probs = numpy.zeros((len(inp), self.n_classes_))
     for i, x in enumerate(inp):
         p = self.activate(x)
         if p.min() < 0:
             p = p - p.min()
         if p.sum() > 0:
             p = p / p.sum()
         probs[i] = p
     preprocessing.normalize(probs, norm="l1", copy=False)
     return probs
    def transform(self, X, y=None):
        """Encode the data as a sparse combination of the dictionary atoms.

        Coding method is determined by the object parameter
        `transform_algorithm`.

        Parameters
        ----------
        X : array of shape (n_samples, n_features)
            Test data to be transformed, must have the same number of
            features as the data used to train the model.

        Returns
        -------
        X_new : array, shape (n_samples, n_components)
            Transformed data

        """
        # XXX : kwargs is not documented
        X = array2d(X)
        n_samples, n_features = X.shape
        
        code = sparse_encode(
            X, self.components_, algorithm=self.transform_algorithm,
            n_nonzero_coefs=self.transform_n_nonzero_coefs,
            alpha=self.transform_alpha, n_jobs=self.n_jobs)

        if self.split_sign:
            # feature vector is split into a positive and negative side
            n_samples, n_features = code.shape
            split_code = np.empty((n_samples, 2 * n_features))
            split_code[:, :n_features] = np.maximum(code, 0)
            split_code[:, n_features:] = -np.minimum(code, 0)
            code = split_code

        return code
 def staged_decision_function(self, X):
     X = array2d(X, dtype=DTYPE)
     result = numpy.zeros(len(X))
     for rate, classifier in zip(self.learning_rates, self.classifiers):
         result += rate * classifier.predict(X)
         yield result
 def decision_function(self, X):
     X = array2d(X, dtype=DTYPE)
     result = numpy.zeros(len(X))
     for rate, estimator in zip(self.learning_rates, self.classifiers):
         result += rate * estimator.predict(X)
     return result
 def staged_decision_function(self, X):
     X = array2d(X, dtype=DTYPE)
     result = numpy.zeros(len(X))
     for rate, classifier in zip(self.learning_rates, self.classifiers):
         result += rate * classifier.predict(X)
         yield result
 def decision_function(self, X):
     X = array2d(X, dtype=DTYPE)
     result = numpy.zeros(len(X))
     for rate, estimator in zip(self.learning_rates, self.classifiers):
         result += rate * estimator.predict(X)
     return result
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
                  n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
                  max_iter=1000, n_jobs=1):
    """Sparse coding

    Each row of the result is the solution to a sparse coding problem.
    The goal is to find a sparse array `code` such that::

        X ~= code * dictionary

    Parameters
    ----------
    X: array of shape (n_samples, n_features)
        Data matrix

    dictionary: array of shape (n_components, n_features)
        The dictionary matrix against which to solve the sparse coding of
        the data. Some of the algorithms assume normalized rows for meaningful
        output.

    gram: array, shape=(n_components, n_components)
        Precomputed Gram matrix, dictionary * dictionary'

    cov: array, shape=(n_components, n_samples)
        Precomputed covariance, dictionary' * X

    algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
        lars: uses the least angle regression method (linear_model.lars_path)
        lasso_lars: uses Lars to compute the Lasso solution
        lasso_cd: uses the coordinate descent method to compute the
        Lasso solution (linear_model.Lasso). lasso_lars will be faster if
        the estimated components are sparse.
        omp: uses orthogonal matching pursuit to estimate the sparse solution
        threshold: squashes to zero all coefficients less than alpha from
        the projection dictionary * X'

    n_nonzero_coefs: int, 0.1 * n_features by default
        Number of nonzero coefficients to target in each column of the
        solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
        and is overridden by `alpha` in the `omp` case.

    alpha: float, 1. by default
        If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
        penalty applied to the L1 norm.
        If `algorithm='threhold'`, `alpha` is the absolute value of the
        threshold below which coefficients will be squashed to zero.
        If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
        the reconstruction error targeted. In this case, it overrides
        `n_nonzero_coefs`.

    init: array of shape (n_samples, n_components)
        Initialization value of the sparse codes. Only used if
        `algorithm='lasso_cd'`.

    max_iter: int, 1000 by default
        Maximum number of iterations to perform if `algorithm='lasso_cd'`.

    copy_cov: boolean, optional
        Whether to copy the precomputed covariance matrix; if False, it may be
        overwritten.

    n_jobs: int, optional
        Number of parallel jobs to run.

    Returns
    -------
    code: array of shape (n_samples, n_components)
        The sparse codes

    See also
    --------
    sklearn.linear_model.lars_path
    sklearn.linear_model.orthogonal_mp
    sklearn.linear_model.Lasso
    SparseCoder
    """
    dictionary = array2d(dictionary)
    X = array2d(X)
    n_samples, n_features = X.shape
    n_components = dictionary.shape[0]

    if gram is None and algorithm != 'threshold':
        gram = np.dot(dictionary, dictionary.T)
    if cov is None:
        copy_cov = False
        cov = np.dot(dictionary, X.T)

    if algorithm in ('lars', 'omp'):
        regularization = n_nonzero_coefs
        if regularization is None:
            regularization = max(n_features / 10, 1)
    else:
        regularization = alpha
        if regularization is None:
            regularization = 1.

    if n_jobs == 1 or algorithm == 'threshold':
        return _sparse_encode(X, dictionary, gram, cov=cov,
                              algorithm=algorithm,
                              regularization=regularization, copy_cov=copy_cov,
                              init=init, max_iter=max_iter)

    # Enter parallel code block
    code = np.empty((n_samples, n_components))
    slices = list(gen_even_slices(n_samples, n_jobs))

    code_views = Parallel(n_jobs=n_jobs)(
        delayed(_sparse_encode)(
            X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
            regularization=regularization, copy_cov=copy_cov,
            init=init[this_slice] if init is not None else None,
            max_iter=max_iter)
        for this_slice in slices)
    for this_slice, this_view in zip(slices, code_views):
        code[this_slice] = this_view
    return code