def predict(self, X): """Predict using the multi-layer perceptron model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples) Predicted target values per element in X. """ X = atleast2d_or_csr(X) scores = self.decision_function(X) if len(scores.shape) == 1 or self.multi_label is True: scores = logistic_sigmoid(scores) results = (scores > 0.5).astype(np.int) if self.multi_label: return self._lbin.inverse_transform(results) else: scores = _softmax(scores) results = scores.argmax(axis=1) return self.classes_[results]
def partial_fit(self, X, y): """Fit the model to the data X and target y. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data. y : numpy array of shape (n_samples) Subset of target values. Returns ------- self : returns an instance of self. """ X = atleast2d_or_csr(X) self.n_outputs = y.shape[1] n_samples, self.n_features = X.shape self._validate_params() if self.coef_hidden_ is None: self._init_fit() self._init_param() if self.t_ is None or self.eta_ is None: self._init_t_eta_() a_hidden, a_output, delta_o = self._preallocate_memory(n_samples) cost = self._backprop_sgd(X, y, n_samples, a_hidden, a_output, delta_o) if self.verbose: print("Iteration %d, cost = %.2f" % (self.t_, cost)) self.t_ += 1 return self
def predict(self, X): """Predict using the multi-layer perceptron model. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples) Predicted target values per element in X. """ X = atleast2d_or_csr(X)
def predict_proba(self, X): X = atleast2d_or_csr(X) if not hasattr(self, "centroids_"): raise AttributeError("Model has not been trained yet.") distances = pairwise_distances(X, self.centroids_, metric=self.metric) # This is evidently silly, as normalizing the distances does not turn # them into probabilities. However, we need this method in # tg.classify.TranslationClassifier._predict() normalize(distances, norm="l1", copy=False) # turn into probability of *similarity* probs = 1.0 - distances return probs
def decision_function(self, X): """Fit the model to the data X and target y. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples) Predicted target values per element in X. """ X = atleast2d_or_csr(X) a_hidden = self.activation_func(safe_sparse_dot(X, self.coef_hidden_) + self.intercept_hidden_) output = safe_sparse_dot(a_hidden, self.coef_output_) + self.intercept_output_ if output.shape[1] == 1: output = output.ravel() return output
def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True, mode=None): """Project the sample on the first eigen vectors of the graph Laplacian. MMP:TO CHANGE THIS The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral embedding is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * http://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev http://dx.doi.org/10.1137%2FS1064827500366124 """ try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg" or mode == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if not mode is None: warnings.warn("'mode' was renamed to eigen_solver " "and will be removed in 0.15.", DeprecationWarning) eigen_solver = mode if eigen_solver is None: eigen_solver = 'arpack' elif not eigen_solver in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 # Check that the matrices given is symmetric if ((not sparse.isspmatrix(adjacency) and not np.all((adjacency - adjacency.T) < 1e-10)) or (sparse.isspmatrix(adjacency) and not np.all((adjacency - adjacency.T).data < 1e-10))): warnings.warn("Graph adjacency matrix should be symmetric. " "Converted to be symmetric by average with its " "transpose.") adjacency = .5 * (adjacency + adjacency.T) if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not work as expected.") laplacian, dd = graph_laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: lambdas, diffusion_map = eigsh(-laplacian, k=n_components, sigma=1.0, which='LM', tol=eigen_tol) embedding = diffusion_map.T[n_components::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg eigen_solver = "lobpcg" if eigen_solver == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. if not sparse.issparse(laplacian): warnings.warn("AMG works better for sparse matrices") laplacian = laplacian.astype(np.float) # lobpcg needs native floats laplacian = _set_diag(laplacian, 1) ml = smoothed_aggregation_solver(atleast2d_or_csr(laplacian)) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif eigen_solver == "lobpcg": laplacian = laplacian.astype(np.float) # lobpcg needs native floats if n_nodes < 5 * n_components + 1: # see note above under arpack why lobpcg has problems with small # number of nodes # lobpcg will fallback to symeig, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.todense() lambdas, diffusion_map = symeig(laplacian) embedding = diffusion_map.T[:n_components] * dd else: # lobpcg needs native floats laplacian = laplacian.astype(np.float) laplacian = _set_diag(laplacian, 1) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError if drop_first: return embedding[1:n_components].T else: return embedding[:n_components].T
def fit(self, X, y): """Fit the model to the data X and target y. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. y : numpy array of shape (n_samples) Subset of the target values. Returns ------- self """ X = atleast2d_or_csr(X) self._validate_params() n_samples, self.n_features = X.shape self.n_outputs = y.shape[1] if not self.warm_start: self._init_t_eta_() self._init_fit() self._init_param() else: if self.t_ is None or self.coef_hidden_ is None: self._init_t_eta_() self._init_fit() self._init_param() if self.shuffle: X, y = shuffle(X, y, random_state=self.random_state) # l-bfgs does not use mini-batches if self.algorithm == "l-bfgs": batch_size = n_samples else: batch_size = np.clip(self.batch_size, 0, n_samples) n_batches = int(n_samples / batch_size) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches)) # preallocate memory a_hidden, a_output, delta_o = self._preallocate_memory(batch_size) if self.algorithm == "sgd": prev_cost = np.inf for i in range(self.max_iter): for batch_slice in batch_slices: cost = self._backprop_sgd(X[batch_slice], y[batch_slice], batch_size, a_hidden, a_output, delta_o) if self.verbose: print("Iteration %d, cost = %.2f" % (i, cost)) if abs(cost - prev_cost) < self.tol: break prev_cost = cost self.t_ += 1 elif "l-bfgs": self._backprop_lbfgs(X, y, n_samples, a_hidden, a_output, delta_o) return self