def test_output_dtype(output_type, dtype, out_dtype, shape): inp = create_input('numpy', dtype, shape, order="F") ary = CumlArray(inp) if dtype in unsupported_cudf_dtypes and \ output_type in ['series', 'dataframe', 'cudf']: with pytest.raises(ValueError): res = ary.to_output( output_type=output_type, output_dtype=out_dtype ) elif shape in [(10, 5), (1, 10)] and output_type == 'series': with pytest.raises(ValueError): res = ary.to_output( output_type=output_type, output_dtype=out_dtype ) else: res = ary.to_output(output_type=output_type, output_dtype=out_dtype) if isinstance(res, cudf.DataFrame): res.values.dtype == out_dtype else: res.dtype == out_dtype
def test_get_set_item(slice, order): if order == 'F' and slice != 'both': pytest.skip("See issue https://github.com/rapidsai/cuml/issues/2412") inp = create_input('numpy', 'float32', (10, 10), order) ary = CumlArray(data=inp) if isinstance(slice, int): assert np.array_equal(inp[slice], ary[slice].to_output('numpy')) inp[slice] = 1.0 ary[slice] = 1.0 elif slice == 'left': assert np.array_equal(inp[5:], ary[5:].to_output('numpy')) inp[5:] = 1.0 ary[5:] = 1.0 elif slice == 'right': assert np.array_equal(inp[:5], ary[:5].to_output('numpy')) inp[:5] = 1.0 ary[:5] = 1.0 elif slice == 'both': assert np.array_equal(inp[:], ary[:].to_output('numpy')) inp[:] = 1.0 ary[:] = 1.0 else: pytest.skip("not implemented logical indexing, unless we need it") assert np.array_equal(inp, ary.to_output('numpy'))
def test_get_set_item(slice, order): inp = create_input('numpy', 'float32', (10, 10), order) ary = CumlArray(data=inp) if isinstance(slice, int): assert np.array_equal(inp[slice], ary[slice].to_output('numpy')) inp[slice] = 1.0 ary[slice] = 1.0 elif slice == 'left': assert np.array_equal(inp[5:], ary[5:].to_output('numpy')) inp[5:] = 1.0 ary[5:] = 1.0 elif slice == 'right': assert np.array_equal(inp[:5], ary[:5].to_output('numpy')) inp[:5] = 1.0 ary[:5] = 1.0 elif slice == 'both': assert np.array_equal(inp[:], ary[:].to_output('numpy')) inp[:] = 1.0 ary[:] = 1.0 else: pytest.skip("not implemented logical indexing, unless we need it") assert np.array_equal(inp, ary.to_output('numpy'))
def test_array_init(input_type, dtype, shape, order): if input_type == 'series': if dtype in unsupported_cudf_dtypes or \ shape in [(10, 5), (1, 10)]: pytest.skip("Unsupported cuDF Series parameter") if input_type is not None: inp = create_input(input_type, dtype, shape, order) ary = CumlArray(data=inp) else: inp = create_input('cupy', dtype, shape, order) ptr = inp.__cuda_array_interface__['data'][0] ary = CumlArray(data=ptr, owner=inp, dtype=inp.dtype, shape=inp.shape, order=order) if shape == (10, 5): assert ary.order == order if shape == 10: assert ary.shape == (10, ) len(ary) == 10 elif input_type == 'series': # cudf Series make their shape (10,) from (10, 1) if shape == (10, 1): assert ary.shape == (10, ) else: assert ary.shape == shape assert ary.dtype == np.dtype(dtype) if input_type == 'numpy': assert isinstance(ary._owner, DeviceBuffer) elif input_type in ['cupy', 'numba', 'series']: assert ary._owner is inp inp_copy = deepcopy(cp.asarray(inp)) # testing owner reference keeps data of ary alive del inp assert cp.all(cp.asarray(ary._owner) == cp.asarray(inp_copy)) else: assert isinstance(ary._owner, cp.ndarray) truth = cp.asnumpy(inp) del inp assert ary.ptr == ptr data = ary.to_output('numpy') assert np.array_equal(truth, data) return True
def test_output(output_type, dtype, order, shape): inp = create_input('numpy', dtype, shape, order) ary = CumlArray(inp) if dtype in unsupported_cudf_dtypes and \ output_type in ['series', 'dataframe', 'cudf']: with pytest.raises(ValueError): res = ary.to_output(output_type) elif shape in [(10, 5), (1, 10)] and output_type == 'series': with pytest.raises(ValueError): res = ary.to_output(output_type) else: res = ary.to_output(output_type) # using correct numba ndarray check if output_type == 'numba': assert cuda.devicearray.is_cuda_ndarray(res) elif output_type == 'cudf': if shape in [(10, 5), (1, 10)]: assert isinstance(res, cudf.DataFrame) else: assert isinstance(res, cudf.Series) else: assert isinstance(res, test_output_types[output_type]) if output_type == 'numpy': assert np.all(inp == ary.to_output('numpy')) elif output_type == 'cupy': assert cp.all(cp.asarray(inp) == ary.to_output('cupy')) elif output_type == 'numba': assert cp.all(cp.asarray(cuda.to_device(inp)) == cp.asarray(res)) elif output_type == 'series': comp = cudf.Series(np.ravel(inp)) == res assert np.all(comp.to_array()) elif output_type == 'dataframe': mat = cuda.to_device(inp) if len(mat.shape) == 1: mat = mat.reshape(mat.shape[0], 1) comp = cudf.DataFrame.from_gpu_matrix(mat) comp = comp == res assert np.all(comp.as_gpu_matrix().copy_to_host()) # check for e2e cartesian product: if output_type not in ['dataframe', 'cudf']: res2 = CumlArray(res) res2 = res2.to_output('numpy') if output_type == 'series' and shape == (10, 1): assert np.all(inp.reshape((1, 10)) == res2) else: assert np.all(inp == res2)
class TfidfTransformer(Base): """ Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The formula that is used to compute the tf-idf for a term t of a document d in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where n is the total number of documents in the document set and df(t) is the document frequency of t; the document frequency is the number of documents in the document set that contain the term t. The effect of adding "1" to the idf in the equation above is that terms with zero idf, i.e., terms that occur in all documents in a training set, will not be entirely ignored. (Note that the idf formula above differs from the standard textbook notation that defines the idf as idf(t) = log [ n / (df(t) + 1) ]). If ``smooth_idf=True`` (the default), the constant "1" is added to the numerator and denominator of the idf as if an extra document was seen containing every term in the collection exactly once, which prevents zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1. Furthermore, the formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR as follows: Tf is "n" (natural) by default, "l" (logarithmic) when ``sublinear_tf=True``. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when ``norm='l2'``, "n" (none) when ``norm=None``. Parameters ---------- norm : {'l1', 'l2'}, default='l2' Each output row will have unit norm, either: * 'l2': Sum of squares of vector elements is 1. The cosine similarity between two vectors is their dot product when l2 norm has been applied. * 'l1': Sum of absolute values of vector elements is 1. use_idf : bool, default=True Enable inverse-document-frequency reweighting. smooth_idf : bool, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : bool, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for computations in this model. Most importantly, this specifies the CUDA stream that will be used for the model's computations, so users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. verbose : int or boolean, default=False Sets logging level. It must be one of `cuml.common.logger.level_*`. See :ref:`verbosity-levels` for more info. output_type : {'input', 'cudf', 'cupy', 'numpy', 'numba'}, default=None Variable to control output type of the results and attributes of the estimator. If None, it'll inherit the output type set at the module level, `cuml.global_settings.output_type`. See :ref:`output-data-type-configuration` for more info. Attributes ---------- idf_ : array of shape (n_features) The inverse document frequency (IDF) vector; only defined if ``use_idf`` is True. """ def __init__(self, *, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False, handle=None, verbose=False, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def _set_doc_stats(self, X): """ We set the following document level statistics here: n_samples n_features df(document frequency) """ # Should not have a cost if already sparse output_dtype = _get_dtype(X) X = self._convert_to_csr(X, output_dtype) n_samples, n_features = X.shape df = _sparse_document_frequency(X) df = df.astype(output_dtype, copy=False) self.__df = CumlArray(df) self.__n_samples = n_samples self.__n_features = n_features return def _set_idf_diag(self): """ Sets idf_diagonal sparse array """ # perform idf smoothing if required df = self.__df.to_output('cupy') + int(self.smooth_idf) n_samples = self.__n_samples + int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = cp.log(n_samples / df) + 1 self._idf_diag = cp.sparse.dia_matrix( (idf, 0), shape=(self.__n_features, self.__n_features), dtype=df.dtype) # Free up memory occupied by below del self.__df @cuml.internals.api_base_return_any_skipall def fit(self, X) -> "TfidfTransformer": """Learn the idf vector (global term weights). Parameters ---------- X : array-like of shape n_samples, n_features A matrix of term/token counts. """ output_dtype = _get_dtype(X) X = self._convert_to_csr(X, output_dtype) if self.use_idf: self._set_doc_stats(X) self._set_idf_diag() return self @cuml.internals.api_base_return_any_skipall def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : array-like of (n_samples, n_features) A matrix of term/token counts copy : bool, default=True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : array-like of shape (n_samples, n_features) """ if copy: X = X.copy() dtype = _get_dtype(X) X = self._convert_to_csr(X, dtype) if X.dtype != dtype: X = X.astype(dtype) n_samples, n_features = X.shape if self.sublinear_tf: cp.log(X.data, X.data) X.data += 1 if self.use_idf: self._check_is_idf_fitted() expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % (n_features, expected_n_features)) csr_diag_mul(X, self._idf_diag, inplace=True) if self.norm: if self.norm == 'l1': csr_row_normalize_l1(X, inplace=True) elif self.norm == 'l2': csr_row_normalize_l2(X, inplace=True) return X @cuml.internals.api_base_return_any_skipall def fit_transform(self, X, copy=True): """ Fit TfidfTransformer to X, then transform X. Equivalent to fit(X).transform(X). Parameters ---------- X : array-like of (n_samples, n_features) A matrix of term/token counts copy : bool, default=True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : array-like of shape (n_samples, n_features) """ return self.fit(X).transform(X, copy=copy) def _check_is_idf_fitted(self): if not hasattr(self, 'idf_'): msg = ("This TfidfTransformer instance is not fitted or the " "value of use_idf is not consistant between " ".fit() and .transform().") raise NotFittedError(msg) def _convert_to_csr(self, X, dtype): """Convert array to CSR format if it not sparse nor CSR.""" if not cupyx.scipy.sparse.isspmatrix_csr(X): if not cupyx.scipy.sparse.issparse(X): X = cupyx.scipy.sparse.csr_matrix(X.astype(dtype)) else: X = X.tocsr() return X @property def idf_(self): # if _idf_diag is not set, this will raise an attribute error, # which means hasattr(self, "idf_") is False return self._idf_diag.data @idf_.setter def idf_(self, value): value = cp.asarray(value, dtype=cp.float32) n_features = value.shape[0] self._idf_diag = cupyx.scipy.sparse.dia_matrix( (value, 0), shape=(n_features, n_features), dtype=cp.float32) def get_param_names(self): return super().get_param_names() + \ ["norm", "use_idf", "smooth_idf", "sublinear_tf"]
def create_input(input_type, input_dtype, input_shape, input_order): rand_ary = cp.ones(input_shape, dtype=input_dtype, order=input_order) cuml_ary = CumlArray(rand_ary) return cuml_ary.to_output(input_type)
def test_array_init(input_type, dtype, shape, order): if input_type == 'series': if dtype in unsupported_cudf_dtypes or \ shape in [(10, 5), (1, 10)]: pytest.skip("Unsupported cuDF Series parameter") if input_type is not None: inp = create_input(input_type, dtype, shape, order) ary = CumlArray(data=inp) ptr = ary.ptr else: inp = create_input('cupy', dtype, shape, order) ptr = inp.__cuda_array_interface__['data'][0] ary = CumlArray(data=ptr, owner=inp, dtype=inp.dtype, shape=inp.shape, order=order) if shape == (10, 5): assert ary.order == order if shape == 10: assert ary.shape == (10, ) assert len(ary) == 10 elif input_type == 'series': # cudf Series make their shape (10,) from (10, 1) if shape == (10, 1): assert ary.shape == (10, ) else: assert ary.shape == shape assert ary.dtype == np.dtype(dtype) if (input_type == "numpy"): assert isinstance(ary._owner, cp.ndarray) truth = cp.asnumpy(inp) del inp assert ary.ptr == ptr data = ary.to_output('numpy') assert np.array_equal(truth, data) else: found_owner = False def get_owner(curr): if (isinstance(curr, CumlArray)): return curr._owner elif (isinstance(curr, cp.ndarray)): return curr.data.mem._owner else: return None # Make sure the input array is in the ownership chain curr_owner = ary while (curr_owner is not None): if (curr_owner is inp): found_owner = True break curr_owner = get_owner(curr_owner) assert found_owner, "GPU input arrays must be in the owner chain" inp_copy = deepcopy(cp.asarray(inp)) # testing owner reference keeps data of ary alive del inp # Force GC just in case it lingers gc.collect() assert cp.all(cp.asarray(ary._owner) == cp.asarray(inp_copy)) return True
class IncrementalPCA(PCA): """ Based on sklearn.decomposition.IncrementalPCA from scikit-learn 0.23.1 Incremental principal components analysis (IPCA). Linear dimensionality reduction using Singular Value Decomposition of the data, keeping only the most significant singular vectors to project the data to a lower dimensional space. The input data is centered but not scaled for each feature before applying the SVD. Depending on the size of the input data, this algorithm can be much more memory efficient than a PCA, and allows sparse input. This algorithm has constant memory complexity, on the order of ``batch_size * n_features``, enabling use of np.memmap files without loading the entire file into memory. For sparse matrices, the input is converted to dense in batches (in order to be able to subtract the mean) which avoids storing the entire dense matrix at any one time. The computational overhead of each SVD is ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples remain in memory at a time. There will be ``n_samples / batch_size`` SVD computations to get the principal components, versus 1 large SVD of complexity ``O(n_samples * n_features ** 2)`` for PCA. Parameters ---------- handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for computations in this model. Most importantly, this specifies the CUDA stream that will be used for the model's computations, so users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. n_components : int or None, (default=None) Number of components to keep. If ``n_components`` is ``None``, then ``n_components`` is set to ``min(n_samples, n_features)``. whiten : bool, optional If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). Whitening allows each component to have unit variance and removes multi-collinearity. It might be beneficial for downstream tasks like LinearRegression where correlated features cause problems. copy : bool, (default=True) If False, X will be overwritten. ``copy=False`` can be used to save memory but is unsafe for general use. batch_size : int or None, (default=None) The number of samples to use for each batch. Only used when calling ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` is inferred from the data and set to ``5 * n_features``, to provide a balance between approximation accuracy and memory consumption. verbose : int or boolean, default=False Sets logging level. It must be one of `cuml.common.logger.level_*`. See :ref:`verbosity-levels` for more info. output_type : {'input', 'cudf', 'cupy', 'numpy', 'numba'}, default=None Variable to control output type of the results and attributes of the estimator. If None, it'll inherit the output type set at the module level, `cuml.global_output_type`. See :ref:`output-data-type-configuration` for more info. Attributes ---------- components_ : array, shape (n_components, n_features) Components with maximum variance. explained_variance_ : array, shape (n_components,) Variance explained by each of the selected components. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If all components are stored, the sum of explained variances is equal to 1.0. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : array, shape (n_features,) Per-feature empirical mean, aggregate over calls to ``partial_fit``. var_ : array, shape (n_features,) Per-feature empirical variance, aggregate over calls to ``partial_fit``. noise_variance_ : float The estimated noise covariance following the Probabilistic PCA model from [4]_. n_components_ : int The estimated number of components. Relevant when ``n_components=None``. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. batch_size_ : int Inferred batch size from ``batch_size``. Notes ----- Implements the incremental PCA model from [1]_. This model is an extension of the Sequential Karhunen-Loeve Transform from [2]_. We have specifically abstained from an optimization used by authors of both papers, a QR decomposition used in specific situations to reduce the algorithmic complexity of the SVD. The source for this technique is [3]_. This technique has been omitted because it is advantageous only when decomposing a matrix with ``n_samples >= 5/3 * n_features`` where ``n_samples`` and ``n_features`` are the matrix rows and columns, respectively. In addition, it hurts the readability of the implemented algorithm. This would be a good opportunity for future optimization, if it is deemed necessary. References ---------- .. [1] `D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008. <https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf>`_ .. [2] `A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and its Application to Images, IEEE Transactions on Image Processing, Volume 9, Number 8, pp. 1371-1374, August 2000. <https://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf>`_ .. [3] G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, Section 5.4.4, pp. 252-253. .. [4] `C. Bishop, 1999. "Pattern Recognition and Machine Learning", Section 12.2.1, pp. 574 <http://www.miketipping.com/papers/met-mppca.pdf>`_ Examples --------- .. code-block:: python >>> from cuml.experimental.decomposition import IncrementalPCA >>> import cupy as cp >>> import cupyx >>> >>> X = cupyx.scipy.sparse.random(1000, 4, format='csr', density=0.07) >>> ipca = IncrementalPCA(n_components=2, batch_size=200) >>> ipca.fit(X) >>> >>> # Components: >>> ipca.components_ array([[-0.02362926, 0.87328851, -0.15971988, 0.45967206], [-0.14643883, 0.11414225, 0.97589354, 0.11471273]]) >>> >>> # Singular Values: >>> ipca.singular_values_ array([4.90298662, 4.54498226]) >>> >>> # Explained Variance: >>> ipca.explained_variance_ array([0.02406334, 0.02067754]) >>> >>> # Explained Variance Ratio: >>> ipca.explained_variance_ratio_ array([0.28018011, 0.24075775]) >>> >>> # Mean: >>> ipca.mean_ array([0.03249896, 0.03629852, 0.03268694, 0.03216601]) >>> >>> # Noise Variance: >>> ipca.noise_variance_.item() 0.003474966583315544 """ def __init__(self, handle=None, n_components=None, *, whiten=False, copy=True, batch_size=None, verbose=False, output_type=None): super(IncrementalPCA, self).__init__(handle=handle, n_components=n_components, whiten=whiten, copy=copy, verbose=verbose, output_type=output_type) self.batch_size = batch_size self._hyperparams = ["n_components", "whiten", "copy", "batch_size"] self._cupy_attributes = True self._sparse_model = True @with_cupy_rmm def fit(self, X, y=None): """ Fit the model with X, using minibatches of size batch_size. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored Returns ------- self : object Returns the instance itself. """ self._set_base_attributes(output_type=X) self.n_samples_seen_ = 0 self._mean_ = .0 self.var_ = .0 if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): X = _validate_sparse_input(X) else: X, n_samples, n_features, self.dtype = \ input_to_cuml_array(X, order='K', check_dtype=[cp.float32, cp.float64]) # NOTE: While we cast the input to a cupy array here, we still # respect the `output_type` parameter in the constructor. This # is done by PCA, which IncrementalPCA inherits from. PCA's # transform and inverse transform convert the output to the # required type. X = X.to_output(output_type='cupy') n_samples, n_features = X.shape if self.batch_size is None: self.batch_size_ = 5 * n_features else: self.batch_size_ = self.batch_size for batch in _gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0): X_batch = X[batch] if cupyx.scipy.sparse.issparse(X_batch): X_batch = X_batch.toarray() self.partial_fit(X_batch, check_input=False) return self @with_cupy_rmm def partial_fit(self, X, y=None, check_input=True): """ Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. check_input : bool Run check_array on X. y : Ignored Returns ------- self : object Returns the instance itself. """ if check_input: if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): raise TypeError( "IncrementalPCA.partial_fit does not support " "sparse input. Either convert data to dense " "or use IncrementalPCA.fit to do so in batches.") self._set_output_type(X) X, n_samples, n_features, self.dtype = \ input_to_cuml_array(X, order='K', check_dtype=[cp.float32, cp.float64]) X = X.to_output(output_type='cupy') else: n_samples, n_features = X.shape if not hasattr(self, '_components_'): self._components_ = None if self.n_components is None: if self._components_ is None: self.n_components_ = min(n_samples, n_features) else: self.n_components_ = self._components_.shape[0] elif not 1 <= self.n_components <= n_features: raise ValueError("n_components=%r invalid for n_features=%d, need " "more rows than columns for IncrementalPCA " "processing" % (self.n_components, n_features)) elif not self.n_components <= n_samples: raise ValueError("n_components=%r must be less or equal to " "the batch number of samples " "%d." % (self.n_components, n_samples)) else: self.n_components_ = self.n_components if (self._components_ is not None) and (self._components_.shape[0] != self.n_components_): raise ValueError("Number of input features has changed from %i " "to %i between calls to partial_fit! Try " "setting n_components to a fixed value." % (self._components_.shape[0], self.n_components_)) if not self._cupy_attributes: self._cumlarray_to_cupy_attrs() self._cupy_attributes = True # This is the first partial_fit if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = 0 self._mean_ = .0 self.var_ = .0 # Update stats - they are 0 if this is the first step col_mean, col_var, n_total_samples = \ _incremental_mean_and_var( X, last_mean=self._mean_, last_variance=self.var_, last_sample_count=cp.repeat(cp.asarray([self.n_samples_seen_]), X.shape[1])) n_total_samples = n_total_samples[0] # Whitening if self.n_samples_seen_ == 0: # If it is the first step, simply whiten X X = X - col_mean else: col_batch_mean = cp.mean(X, axis=0) X = X - col_batch_mean # Build matrix of combined previous basis and new data mean_correction = \ cp.sqrt((self.n_samples_seen_ * n_samples) / n_total_samples) * (self._mean_ - col_batch_mean) X = cp.vstack((self._singular_values_.reshape( (-1, 1)) * self._components_, X, mean_correction)) U, S, V = cp.linalg.svd(X, full_matrices=False) U, V = _svd_flip(U, V, u_based_decision=False) explained_variance = S**2 / (n_total_samples - 1) explained_variance_ratio = S**2 / cp.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self._components_ = V[:self.n_components_] self._singular_values_ = S[:self.n_components_] self._mean_ = col_mean self.var_ = col_var self._explained_variance_ = explained_variance[:self.n_components_] self._explained_variance_ratio_ = \ explained_variance_ratio[:self.n_components_] if self.n_components_ < n_features: self._noise_variance_ = \ explained_variance[self.n_components_:].mean() else: self._noise_variance_ = 0. if self._cupy_attributes: self._cupy_to_cumlarray_attrs() self._cupy_attributes = False return self @with_cupy_rmm def transform(self, X, convert_dtype=False): """ Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set, using minibatches of size batch_size if X is sparse. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. convert_dtype : bool, optional (default = False) When set to True, the transform method will automatically convert the input to the data type which was used to train the model. This will increase memory used for the method. Returns ------- X_new : array-like, shape (n_samples, n_components) """ if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): out_type = self._get_output_type(X) X = _validate_sparse_input(X) n_samples = X.shape[0] output = [] for batch in _gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0): output.append(super().transform(X[batch])) output, _, _, _ = \ input_to_cuml_array(cp.vstack(output), order='K') return output.to_output(out_type) else: return super().transform(X) def get_param_names(self): # Skip super() since we dont pass any extra parameters in __init__ return Base.get_param_names(self) + self._hyperparams def _cupy_to_cumlarray_attrs(self): self._components_ = CumlArray(self._components_.copy()) self._mean_ = CumlArray(self._mean_) self._noise_variance_ = CumlArray(self._noise_variance_) self._singular_values_ = CumlArray(self._singular_values_) self._explained_variance_ = CumlArray(self._explained_variance_.copy()) self._explained_variance_ratio_ = \ CumlArray(self._explained_variance_ratio_) def _cumlarray_to_cupy_attrs(self): self._components_ = self._components_.to_output("cupy") self._mean_ = self._mean_.to_output("cupy") self._noise_variance_ = self._noise_variance_.to_output("cupy") self._singular_values_ = self._singular_values_.to_output("cupy") self._explained_variance_ = self._explained_variance_.to_output("cupy") self._explained_variance_ratio_ = \ self._explained_variance_ratio_.to_output("cupy")
class IncrementalPCA(PCA): """ Based on sklearn.decomposition.IncrementalPCA from scikit-learn 0.23.1 Incremental principal components analysis (IPCA). Linear dimensionality reduction using Singular Value Decomposition of the data, keeping only the most significant singular vectors to project the data to a lower dimensional space. The input data is centered but not scaled for each feature before applying the SVD. Depending on the size of the input data, this algorithm can be much more memory efficient than a PCA, and allows sparse input. This algorithm has constant memory complexity, on the order of ``batch_size * n_features``, enabling use of np.memmap files without loading the entire file into memory. For sparse matrices, the input is converted to dense in batches (in order to be able to subtract the mean) which avoids storing the entire dense matrix at any one time. The computational overhead of each SVD is ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples remain in memory at a time. There will be ``n_samples / batch_size`` SVD computations to get the principal components, versus 1 large SVD of complexity ``O(n_samples * n_features ** 2)`` for PCA. Examples --------- .. code-block:: python from cuml.decomposition import IncrementalPCA import cupy as cp import cupyx X = cupyx.scipy.sparse.random(1000, 5, format='csr', density=0.07) ipca = IncrementalPCA(n_components=2, batch_size=200) ipca.fit(X) print("Components: \n", ipca.components_) print("Singular Values: ", ipca.singular_values_) print("Explained Variance: ", ipca.explained_variance_) print("Explained Variance Ratio: ", ipca.explained_variance_ratio_) print("Mean: ", ipca.mean_) print("Noise Variance: ", ipca.noise_variance_) Output: .. code-block:: python Components: [[ 0.40465797 0.70924681 -0.46980153 -0.32028596 -0.09962083] [ 0.3072285 -0.31337166 -0.21010504 -0.25727659 0.83490926]] Singular Values: [4.67710479 4.0249979 ] Explained Variance: [0.02189721 0.01621682] Explained Variance Ratio: [0.2084041 0.15434174] Mean: [0.03341744 0.03796517 0.03316038 0.03825872 0.0253353 ] Noise Variance: 0.0049539530909571425 Parameters ---------- handle : cuml.Handle If it is None, a new one is created just for this class n_components : int or None, (default=None) Number of components to keep. If ``n_components `` is ``None``, then ``n_components`` is set to ``min(n_samples, n_features)``. whiten : bool, optional If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). Whitening allows each component to have unit variance and removes multi-collinearity. It might be beneficial for downstream tasks like LinearRegression where correlated features cause problems. copy : bool, (default=True) If False, X will be overwritten. ``copy=False`` can be used to save memory but is unsafe for general use. batch_size : int or None, (default=None) The number of samples to use for each batch. Only used when calling ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` is inferred from the data and set to ``5 * n_features``, to provide a balance between approximation accuracy and memory consumption. verbose : int or boolean (default = False) Logging level Attributes ---------- components_ : array, shape (n_components, n_features) Components with maximum variance. explained_variance_ : array, shape (n_components,) Variance explained by each of the selected components. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If all components are stored, the sum of explained variances is equal to 1.0. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : array, shape (n_features,) Per-feature empirical mean, aggregate over calls to ``partial_fit``. var_ : array, shape (n_features,) Per-feature empirical variance, aggregate over calls to ``partial_fit``. noise_variance_ : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. n_components_ : int The estimated number of components. Relevant when ``n_components=None``. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. batch_size_ : int Inferred batch size from ``batch_size``. Notes ----- Implements the incremental PCA model from: *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008.* See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf This model is an extension of the Sequential Karhunen-Loeve Transform from: *A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and its Application to Images, IEEE Transactions on Image Processing, Volume 9, Number 8, pp. 1371-1374, August 2000.* See https://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf We have specifically abstained from an optimization used by authors of both papers, a QR decomposition used in specific situations to reduce the algorithmic complexity of the SVD. The source for this technique is *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, section 5.4.4, pp 252-253.*. This technique has been omitted because it is advantageous only when decomposing a matrix with ``n_samples`` (rows) >= 5/3 * ``n_features`` (columns), and hurts the readability of the implemented algorithm. This would be a good opportunity for future optimization, if it is deemed necessary. References ---------- D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008. G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, Section 5.4.4, pp. 252-253. """ def __init__(self, handle=None, n_components=None, *, whiten=False, copy=True, batch_size=None, verbose=None, output_type=None): super(IncrementalPCA, self).__init__(handle=handle, n_components=n_components, whiten=whiten, copy=copy, verbose=verbose, output_type=output_type) self.batch_size = batch_size self._hyperparams = ["n_components", "whiten", "copy", "batch_size"] self._cupy_attributes = True self._sparse_model = True @with_cupy_rmm def fit(self, X, y=None): """Fit the model with X, using minibatches of size batch_size. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : Ignored Returns ------- self : object Returns the instance itself. """ self._set_base_attributes(output_type=X) self.n_samples_seen_ = 0 self._mean_ = .0 self.var_ = .0 if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): X = _validate_sparse_input(X) else: X, n_samples, n_features, self.dtype = \ input_to_cuml_array(X, order='K', check_dtype=[cp.float32, cp.float64]) # NOTE: While we cast the input to a cupy array here, we still # respect the `output_type` parameter in the constructor. This # is done by PCA, which IncrementalPCA inherits from. PCA's # transform and inverse transform convert the output to the # required type. X = X.to_output(output_type='cupy') n_samples, n_features = X.shape if self.batch_size is None: self.batch_size_ = 5 * n_features else: self.batch_size_ = self.batch_size for batch in _gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0): X_batch = X[batch] if cupyx.scipy.sparse.issparse(X_batch): X_batch = X_batch.toarray() self.partial_fit(X_batch, check_input=False) return self @with_cupy_rmm def partial_fit(self, X, y=None, check_input=True): """Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. check_input : bool Run check_array on X. y : Ignored Returns ------- self : object Returns the instance itself. """ if check_input: if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): raise TypeError( "IncrementalPCA.partial_fit does not support " "sparse input. Either convert data to dense " "or use IncrementalPCA.fit to do so in batches.") self._set_output_type(X) X, n_samples, n_features, self.dtype = \ input_to_cuml_array(X, order='K', check_dtype=[cp.float32, cp.float64]) X = X.to_output(output_type='cupy') else: n_samples, n_features = X.shape if not hasattr(self, '_components_'): self._components_ = None if self.n_components is None: if self._components_ is None: self.n_components_ = min(n_samples, n_features) else: self.n_components_ = self._components_.shape[0] elif not 1 <= self.n_components <= n_features: raise ValueError("n_components=%r invalid for n_features=%d, need " "more rows than columns for IncrementalPCA " "processing" % (self.n_components, n_features)) elif not self.n_components <= n_samples: raise ValueError("n_components=%r must be less or equal to " "the batch number of samples " "%d." % (self.n_components, n_samples)) else: self.n_components_ = self.n_components if (self._components_ is not None) and (self._components_.shape[0] != self.n_components_): raise ValueError("Number of input features has changed from %i " "to %i between calls to partial_fit! Try " "setting n_components to a fixed value." % (self._components_.shape[0], self.n_components_)) if not self._cupy_attributes: self._cumlarray_to_cupy_attrs() self._cupy_attributes = True # This is the first partial_fit if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = 0 self._mean_ = .0 self.var_ = .0 # Update stats - they are 0 if this is the first step col_mean, col_var, n_total_samples = \ _incremental_mean_and_var( X, last_mean=self._mean_, last_variance=self.var_, last_sample_count=cp.repeat(cp.asarray([self.n_samples_seen_]), X.shape[1])) n_total_samples = n_total_samples[0] # Whitening if self.n_samples_seen_ == 0: # If it is the first step, simply whiten X X = X - col_mean else: col_batch_mean = cp.mean(X, axis=0) X = X - col_batch_mean # Build matrix of combined previous basis and new data mean_correction = \ cp.sqrt((self.n_samples_seen_ * n_samples) / n_total_samples) * (self._mean_ - col_batch_mean) X = cp.vstack((self._singular_values_.reshape( (-1, 1)) * self._components_, X, mean_correction)) U, S, V = cp.linalg.svd(X, full_matrices=False) U, V = _svd_flip(U, V, u_based_decision=False) explained_variance = S**2 / (n_total_samples - 1) explained_variance_ratio = S**2 / cp.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self._components_ = V[:self.n_components_] self._singular_values_ = S[:self.n_components_] self._mean_ = col_mean self.var_ = col_var self._explained_variance_ = explained_variance[:self.n_components_] self._explained_variance_ratio_ = \ explained_variance_ratio[:self.n_components_] if self.n_components_ < n_features: self._noise_variance_ = \ explained_variance[self.n_components_:].mean() else: self._noise_variance_ = 0. if self._cupy_attributes: self._cupy_to_cumlarray_attrs() self._cupy_attributes = False return self @with_cupy_rmm def transform(self, X, convert_dtype=False): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set, using minibatches of size batch_size if X is sparse. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. convert_dtype : bool, optional (default = False) When set to True, the transform method will automatically convert the input to the data type which was used to train the model. This will increase memory used for the method. Returns ------- X_new : array-like, shape (n_samples, n_components) """ if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X): out_type = self._get_output_type(X) X = _validate_sparse_input(X) n_samples = X.shape[0] output = [] for batch in _gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0): output.append(super().transform(X[batch])) output, _, _, _ = \ input_to_cuml_array(cp.vstack(output), order='K') return output.to_output(out_type) else: return super().transform(X) def get_param_names(self): return self._hyperparams def _cupy_to_cumlarray_attrs(self): self._components_ = CumlArray(self._components_.copy()) self._mean_ = CumlArray(self._mean_) self._noise_variance_ = CumlArray(self._noise_variance_) self._singular_values_ = CumlArray(self._singular_values_) self._explained_variance_ = CumlArray(self._explained_variance_.copy()) self._explained_variance_ratio_ = \ CumlArray(self._explained_variance_ratio_) def _cumlarray_to_cupy_attrs(self): self._components_ = self._components_.to_output("cupy") self._mean_ = self._mean_.to_output("cupy") self._noise_variance_ = self._noise_variance_.to_output("cupy") self._singular_values_ = self._singular_values_.to_output("cupy") self._explained_variance_ = self._explained_variance_.to_output("cupy") self._explained_variance_ratio_ = \ self._explained_variance_ratio_.to_output("cupy")