Пример #1
0
def train(arduino_serial, is_right_side):        
    training_values = collect_points(arduino_serial, is_right_side)
    [M1, M2, M3, M4] = populate_matrices(training_values)
    
    # find inverses using singular value decomposition
    M1inv = linalg.pinv2(M1)
    M2inv = linalg.pinv2(M2)
    M3inv = linalg.pinv2(M3)
    M4inv = linalg.pinv2(M4)
    
    print M1inv.shape
    print x.shape

    # find coefficients
    xCoeff1 = M1inv * x
    xCoeff2 = M2inv * x
    xCoeff3 = M3inv * x
    xCoeff4 = M4inv * x
    print xCoeff1

    yCoeff1 = M1inv * y
    yCoeff2 = M2inv * y
    yCoeff3 = M3inv * y
    yCoeff4 = M4inv * y
    print yCoeff1
    
    return [xCoeff1, xCoeff2, xCoeff3, xCoeff4, yCoeff1, yCoeff2, yCoeff3, yCoeff4]
Пример #2
0
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
                                 norm_y_weights=False):
    """Inner loop of the iterative NIPALS algorithm.

    Provides an alternative to the svd(X'Y); returns the first left and right
    singular vectors of X'Y.  See PLS for the meaning of the parameters.  It is
    similar to the Power method for determining the eigenvectors and
    eigenvalues of a X'Y.
    """
    y_score = Y[:, [0]]
    x_weights_old = 0
    ite = 1
    X_pinv = Y_pinv = None
    eps = np.finfo(X.dtype).eps
    # Inner loop of the Wold algo.
    while True:
        # 1.1 Update u: the X weights
        if mode == "B":
            if X_pinv is None:
                # We use slower pinv2 (same as np.linalg.pinv) for stability
                # reasons
                X_pinv = pinv2(X, check_finite=False)
            x_weights = np.dot(X_pinv, y_score)
        else:  # mode A
            # Mode A regress each X column on y_score
            x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
        # If y_score only has zeros x_weights will only have zeros. In
        # this case add an epsilon to converge to a more acceptable
        # solution
        if np.dot(x_weights.T, x_weights) < eps:
            x_weights += eps
        # 1.2 Normalize u
        x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
        # 1.3 Update x_score: the X latent scores
        x_score = np.dot(X, x_weights)
        # 2.1 Update y_weights
        if mode == "B":
            if Y_pinv is None:
                Y_pinv = pinv2(Y, check_finite=False)  # compute once pinv(Y)
            y_weights = np.dot(Y_pinv, x_score)
        else:
            # Mode A regress each Y column on x_score
            y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
        # 2.2 Normalize y_weights
        if norm_y_weights:
            y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
        # 2.3 Update y_score: the Y latent scores
        y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
        # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
        x_weights_diff = x_weights - x_weights_old
        if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
            break
        if ite == max_iter:
            warnings.warn('Maximum number of iterations reached',
                          ConvergenceWarning)
            break
        x_weights_old = x_weights
        ite += 1
    return x_weights, y_weights, ite
Пример #3
0
    def add_fit(self,X):
        n_samples = X.shape[0]

        # old
        first = safe_sparse_dot(self.hidden_activations_.T, self.hidden_activations_)
        M = pinv2(first+1*np.identity(first.shape[0]))
        beta = self.coef_output_
        # new
        H = self._get_hidden_activations(X)
        # update
        first = pinv2(1*np.identity(n_samples)+safe_sparse_dot(safe_sparse_dot(H,M),H.T))
        second = safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(M,H.T),first),H),M)
        M = M - second
        self.coef_output_ = beta + safe_sparse_dot(safe_sparse_dot(M,H.T),(X - safe_sparse_dot(H,beta)))
Пример #4
0
def _compute_eloreta_inv(G, W, n_orient, n_nzero, lambda2, force_equal):
    """Invert weights and compute M."""
    W_inv = np.empty_like(W)
    n_src = W_inv.shape[0]
    if n_orient == 1 or force_equal:
        W_inv[:] = 1. / W
    else:
        for ii in range(n_src):
            # Here we use a single-precision-suitable `rcond` (given our
            # 3x3 matrix size) because the inv could be saved in single
            # precision.
            W_inv[ii] = linalg.pinv2(W[ii], rcond=1e-7)

    # Weight the gain matrix
    W_inv_Gt = np.empty_like(G).T
    for ii in range(n_src):
        sl = slice(n_orient * ii, n_orient * (ii + 1))
        W_inv_Gt[sl, :] = np.dot(W_inv[ii], G[:, sl].T)

    # Compute the inverse, normalizing by the trace
    G_W_inv_Gt = np.dot(G, W_inv_Gt)
    G_W_inv_Gt *= n_nzero / np.trace(G_W_inv_Gt)
    u, s, v = linalg.svd(G_W_inv_Gt)
    s = s / (s ** 2 + lambda2)
    M = np.dot(v.T[:, :n_nzero] * s[:n_nzero], u.T[:n_nzero])
    return M, W_inv
Пример #5
0
    def fit(self, X, y):
        if self.activation is None:
            # Useful to quantify the impact of the non-linearity
            self._activate = lambda x: x
        else:
            self._activate = self.activations[self.activation]
        rng = check_random_state(self.random_state)

        # one-of-K coding for output values
        self.classes_ = unique_labels(y)
        Y = label_binarize(y, self.classes_)

        # set hidden layer parameters randomly
        n_features = X.shape[1]
        if self.rank is None:
            if self.density == 1:
                self.weights_ = rng.randn(n_features, self.n_hidden)
            else:
                self.weights_ = sparse_random_matrix(
                    self.n_hidden, n_features, density=self.density,
                    random_state=rng).T
        else:
            # Low rank weight matrix
            self.weights_u_ = rng.randn(n_features, self.rank)
            self.weights_v_ = rng.randn(self.rank, self.n_hidden)
        self.biases_ = rng.randn(self.n_hidden)

        # map the input data through the hidden layer
        H = self.transform(X)

        # fit the linear model on the hidden layer activation
        self.beta_ = np.dot(pinv2(H), Y)
        return self
Пример #6
0
    def calculate_arm_q_dot(self, cmd):

        # ctr_mod decides to control translational or rotational velecity
        if cmd.ctrl_mod:
            eef_vel = np.array(
                [0., 0., 0., cmd.eef_vel[5], -cmd.eef_vel[3], cmd.eef_vel[4]])
        else:
            eef_vel = np.array(
                [cmd.eef_vel[2], -cmd.eef_vel[0], cmd.eef_vel[1], 0., 0., 0.])
        # print('EEF Cmd: {} '.format(eef_vel))

        # get position jacobian of eef
        jac_pos = self.sim.data.get_body_jacp(self.eef_link)
        jac_pos = jac_pos.reshape(3, self.sim.model.nv)
        jac_pos = jac_pos[:, 0:7]

        # get position jacobian of eef
        jac_rot = self.sim.data.get_body_jacr(self.eef_link)
        jac_rot = jac_rot.reshape(3, self.sim.model.nv)
        jac_rot = jac_rot[:, 0:7]

        jac_full = np.concatenate((jac_pos, jac_rot))

        # calculate pseudo-inverse of jacobian
        jac_inv = pinv2(jac_full)
        q_dot = np.dot(jac_inv, eef_vel)

        return q_dot
Пример #7
0
    def fit(self, X=None, y=None):
        """
        The Gaussian Process model fitting method.

        Parameters
        ----------
        X : double array_like
            An array with shape (n_samples, n_features) with the input at which
            observations were made.

        y : array_like, shape (n_samples, 3)
            An array with shape (n_eval, 3) with the observations of the output to be predicted.
            of shape (n_samples, 3) with the Best Linear Unbiased Prediction at x.

        Returns
        -------
        gp : self
            A fitted Gaussian Process model object awaiting data to perform
            predictions.
        """

        if X:
            K_list = self.calc_scalar_kernel_matrices(X)
        else:
            K_list = self.calc_scalar_kernel_matrices()

        # add diagonal noise to each scalar kernel matrix
        K_list = [K + self.nugget * sp.ones(K.shape[0]) for K in K_list]

        Kglob = None
        # outer_iv = [sp.outer(iv, iv.T) for iv in self.ivs] # NO, wrong
        for K, ivs, iv_corr in zip(K_list, self.ivs, self.iv_corr):
            # make the outer product tensor of shape (N_ls, N_ls, 3, 3) and multiply it with the scalar kernel
            K3D = iv_corr * K[:, :, None, None] * rotmat_multi(ivs, ivs)
            # reshape tensor onto a 2D array tiled with 3x3 matrix blocks
            if Kglob is None:
                Kglob = K3D
            else:
                Kglob += K3D
        Kglob = my_tensor_reshape(Kglob)
        # # all channels merged into one covariance matrix
        # # K^{glob}_{ij} = \sum_{k = 1}^{N_{IVs}} w_k D_{k, ij} |v_k^i\rangle \langle v_k^j |

        try:
            inv = LA.pinv2(Kglob)
        except LA.LinAlgError as err:
            print("pinv2 failed: %s. Switching to pinvh" % err)
            try:
                inv = LA.pinvh(Kglob)
            except LA.LinAlgError as err:
                print("pinvh failed: %s. Switching to pinv2" % err)
                inv = None

        # alpha is the vector of regression coefficients of GaussianProcess
        alpha = sp.dot(inv, self.y.ravel())

        if not self.low_memory:
            self.inverse = inv
            self.Kglob = Kglob
        self.alpha = sp.array(alpha)
Пример #8
0
    def fit(self, X, y, activation='relu'):
        """Fits the training data to the model based on an activation function.
        
        Parameters:
        -----------
        X : array-like
            The input data to be fit by the model.

        y : array-like
            The targets of the data. This is the target matrix T

        activation: string
            The selected activation function. Options are:
            'relu', 'sigmoid', 'tanh'
        
        Returns:
        --------
        Beta : array
            The learned weights.
        """
        # convert X and y to arrays
        X = np.array(X)
        y = np.array(y)

        # set the activation function for the whole ELM object
        self.activation = activation

        # compute the output weight vectors (beta)
        # This is retrieved using the Moore-Penrose generalized inverse
        self.beta = np.dot(pinv2(self.hidden_nodes(X)), y)
        return self.beta
Пример #9
0
    def compute_transcription_factor_activity(
            self, allow_self_interactions_for_duplicate_prior_columns=True):

        activity, self.prior, non_zero_tfs = process_expression_into_activity(
            self.expression_matrix, self.prior)
        self.fix_self_interacting(
            non_zero_tfs,
            allow_duplicates=allow_self_interactions_for_duplicate_prior_columns
        )

        # Set the activity of non-zero tfs to the pseudoinverse of the prior matrix times the expression
        if len(non_zero_tfs) > 0:
            utils.Debug.vprint(
                "Calculating TFA for {nz} TFs from prior targets".format(
                    nz=len(non_zero_tfs)),
                level=1)
            activity.loc[non_zero_tfs, :] = np.matrix(
                linalg.pinv2(self.prior[non_zero_tfs])) * np.matrix(
                    self.expression_matrix_halftau)
        else:
            utils.Debug.vprint(
                "No prior information for TFs exists. Using expression for TFA exclusively.",
                level=0)

        activity_nas = activity.isna().any(axis=1)
        if activity_nas.sum() > 0:
            lose_tfs = activity_nas.index[activity_nas].tolist()
            utils.Debug.vprint("Dropping TFs with NaN values: {drop}".format(
                drop=" ".join(lose_tfs)))
            activity = activity.dropna(axis=0)

        return activity
Пример #10
0
 def test_simple_rows(self):
     a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
     a_pinv = pinv(a)
     a_pinv2 = pinv2(a)
     a_pinv3 = pinv3(a)
     assert_array_almost_equal(a_pinv,a_pinv2)
     assert_array_almost_equal(a_pinv,a_pinv3)
Пример #11
0
    def test_pinv_array(self):
        from scipy.linalg import pinv2

        tests = []
        tests.append(rand(1, 1, 1))
        tests.append(rand(3, 1, 1))
        tests.append(rand(1, 2, 2))
        tests.append(rand(3, 2, 2))
        tests.append(rand(1, 3, 3))
        tests.append(rand(3, 3, 3))
        A = rand(1, 3, 3)
        A[0, 0, :] = A[0, 1, :]
        tests.append(A)

        tests.append(rand(1, 1, 1) + 1.0j * rand(1, 1, 1))
        tests.append(rand(3, 1, 1) + 1.0j * rand(3, 1, 1))
        tests.append(rand(1, 2, 2) + 1.0j * rand(1, 2, 2))
        tests.append(rand(3, 2, 2) + 1.0j * rand(3, 2, 2))
        tests.append(rand(1, 3, 3) + 1.0j * rand(1, 3, 3))
        tests.append(rand(3, 3, 3) + 1.0j * rand(3, 3, 3))
        A = rand(1, 3, 3) + 1.0j * rand(1, 3, 3)
        A[0, 0, :] = A[0, 1, :]
        tests.append(A)

        for test in tests:
            pinv_test = zeros_like(test)
            for i in range(pinv_test.shape[0]):
                pinv_test[i] = pinv2(test[i])

            pinv_array(test)
            assert_array_almost_equal(test, pinv_test, decimal=4)
Пример #12
0
def _compute_eloreta_inv(G, W, n_orient, n_nzero, lambda2, force_equal):
    """Invert weights and compute M."""
    W_inv = np.empty_like(W)
    n_src = W_inv.shape[0]
    if n_orient == 1 or force_equal:
        W_inv[:] = 1. / W
    else:
        for ii in range(n_src):
            # Here we use a single-precision-suitable `rcond` (given our
            # 3x3 matrix size) because the inv could be saved in single
            # precision.
            W_inv[ii] = linalg.pinv2(W[ii], rcond=1e-7)

    # Weight the gain matrix
    W_inv_Gt = np.empty_like(G).T
    for ii in range(n_src):
        sl = slice(n_orient * ii, n_orient * (ii + 1))
        W_inv_Gt[sl, :] = np.dot(W_inv[ii], G[:, sl].T)

    # Compute the inverse, normalizing by the trace
    G_W_inv_Gt = np.dot(G, W_inv_Gt)
    G_W_inv_Gt *= n_nzero / np.trace(G_W_inv_Gt)
    u, s, v = linalg.svd(G_W_inv_Gt)
    s = s / (s**2 + lambda2)
    M = np.dot(v.T[:, :n_nzero] * s[:n_nzero], u.T[:n_nzero])
    return M, W_inv
    def _evaluateNet(self):
        wtRatio=1./3.
        inputs=self.dataset.getField('input')
        targets=self.dataset.getField('target')

        training_start=int(wtRatio*len(inputs))
        washout_inputs=inputs[:training_start]
        training_inputs=inputs[training_start:]
        training_targets=targets[training_start:]
        phis=[]

        self.model.network.reset()

        self.model.washout(washout_inputs)
        phis.append(self.model.washout(training_inputs))

        PHI=concatenate(phis).T
        PHI_INV=pinv2(PHI)
        TARGET=concatenate(training_targets).T

        W=dot(TARGET,PHI_INV)
        self.model.setOutputWeightMatrix(W)

        self.model.activate(washout_inputs)
        outputs=self.model.activate(training_inputs)

        OUTPUT=concatenate(outputs)
        TARGET=TARGET.T

        fitness=self.evalfunc(OUTPUT,TARGET)

        return fitness
Пример #14
0
    def fit(self, X, y):
        if self.activation is None:
            # Useful to quantify the impact of the non-linearity
            self._activate = lambda x: x
        else:
            self._activate = self.activations[self.activation]
        rng = check_random_state(self.random_state)

        # one-of-K coding for output values
        self.classes_ = unique_labels(y)
        Y = label_binarize(y, self.classes_)

        # set hidden layer parameters randomly
        n_features = X.shape[1]
        if self.rank is None:
            if self.density == 1:
                self.weights_ = rng.randn(n_features, self.n_hidden)
            else:
                self.weights_ = sparse_random_matrix(self.n_hidden,
                                                     n_features,
                                                     density=self.density,
                                                     random_state=rng).T
        else:
            # Low rank weight matrix
            self.weights_u_ = rng.randn(n_features, self.rank)
            self.weights_v_ = rng.randn(self.rank, self.n_hidden)
        self.biases_ = rng.randn(self.n_hidden)

        # map the input data through the hidden layer
        H = self.transform(X)

        # fit the linear model on the hidden layer activation
        self.beta_ = np.dot(pinv2(H), Y)
        return self
Пример #15
0
 def test_simple_cols(self):
     a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
     a_pinv = pinv(a)
     a_pinv2 = pinv2(a)
     a_pinv3 = pinv3(a)
     assert_array_almost_equal(a_pinv,a_pinv2)
     assert_array_almost_equal(a_pinv,a_pinv3)
Пример #16
0
def unwhiten(X, comp):
    """
    Inverse process of whitening.
    _comp_ is assumed to be column wise.
    """
    uw = la.pinv2(comp)
    return np.dot(X, uw)
Пример #17
0
def _pseudo_inverse_dense(L, rhoss, method='direct'):
    """
    Internal function for computing the pseudo inverse of an Liouvillian using
    dense matrix methods. See pseudo_inverse for details.
    """
    if method == 'direct':
        rho_vec = np.transpose(mat2vec(rhoss.full()))

        tr_mat = tensor([identity(n) for n in L.dims[0][0]])
        tr_vec = np.transpose(mat2vec(tr_mat.full()))

        N = np.prod(L.dims[0][0])
        I = np.identity(N * N)
        P = np.kron(np.transpose(rho_vec), tr_vec)
        Q = I - P
        LIQ = np.linalg.solve(L.full(), Q)
        R = np.dot(Q, LIQ)

        return Qobj(R, dims=L.dims)

    elif method == 'numpy':
        return Qobj(np.linalg.pinv(L.full()), dims=L.dims)

    elif method == 'scipy':
        return Qobj(la.pinv(L.full()), dims=L.dims)

    elif method == 'scipy2':
        return Qobj(la.pinv2(L.full()), dims=L.dims)

    else:
        raise ValueError("Unsupported method '%s'. Use 'direct' or 'numpy'" %
                         method)
Пример #18
0
def baseline(y, deg=None, max_it=None, tol=None):
    """
    Computes the baseline of a given data.

    Iteratively performs a polynomial fitting in the data to detect its
    baseline. At every iteration, the fitting weights on the regions with
    peaks are reduced to identify the baseline only.

    Parameters
    ----------
    y : ndarray
        Data to detect the baseline.
    deg : int
        Degree of the polynomial that will estimate the data baseline. A low
        degree may fail to detect all the baseline present, while a high
        degree may make the data too oscillatory, especially at the edges.
    max_it : int
        Maximum number of iterations to perform.
    tol : float
        Tolerance to use when comparing the difference between the current
        fit coefficients and the ones from the last iteration. The iteration
        procedure will stop when the difference between them is lower than
        *tol*.

    Returns
    -------
    baseline : ndarray
        Array with the baseline amplitude for every original point in *y*
    """

    # for not repeating ourselves in `envelope`
    if deg is None:
        deg = 3
    if max_it is None:
        max_it = 100
    if tol is None:
        tol = 1e-3

    order = deg + 1
    coeffs = np.ones(order)

    # try to avoid numerical issues
    cond = math.pow(y.max(), 1. / order)
    x = np.linspace(0., cond, y.size)
    base = y.copy()

    vander = np.vander(x, order)
    vander_pinv = la.pinv2(vander)

    for _ in range(max_it):
        coeffs_new = np.dot(vander_pinv, y)

        if la.norm(coeffs_new - coeffs) / la.norm(coeffs) < tol:
            break

        coeffs = coeffs_new
        base = np.dot(vander, coeffs)
        y = np.minimum(y, base)

    return base
Пример #19
0
	def compute_G(self, D):
		"""
		Computes matrix G given the selected model

		@param D Prototypes matrix [n_prototypes, n_features].

		@return Matrix G [n_prototypes, n_prototypes].
		"""

		# Computing matrix G according with selected model
		if (self.model == "SBM"):

			# Computing similarity matrix and inverting
			G = psimilarity(A=D, kernel=self.kernel, gamma=self.gamma,\
				norm=self.norm, icov_mtx=self.icov_mtx)
			G = pinv2(G)
			#G = np.linalg.pinv(G)

		elif (self.model == "AAKR"):

			# Using identity matrix as inverse similarity matrix
			G = np.eye(D.shape[0])

		# Return G
		return G
Пример #20
0
 def test_simple_complex(self):
     a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) +
          1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
Пример #21
0
 def test_simple_complex(self):
     a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
          + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
Пример #22
0
    def test_pinv_array(self):
        from scipy.linalg import pinv2

        tests = []
        tests.append(np.random.rand(1, 1, 1))
        tests.append(np.random.rand(3, 1, 1))
        tests.append(np.random.rand(1, 2, 2))
        tests.append(np.random.rand(3, 2, 2))
        tests.append(np.random.rand(1, 3, 3))
        tests.append(np.random.rand(3, 3, 3))
        A = np.random.rand(1, 3, 3)
        A[0, 0, :] = A[0, 1, :]
        tests.append(A)

        tests.append(np.random.rand(1, 1, 1) + 1.0j*np.random.rand(1, 1, 1))
        tests.append(np.random.rand(3, 1, 1) + 1.0j*np.random.rand(3, 1, 1))
        tests.append(np.random.rand(1, 2, 2) + 1.0j*np.random.rand(1, 2, 2))
        tests.append(np.random.rand(3, 2, 2) + 1.0j*np.random.rand(3, 2, 2))
        tests.append(np.random.rand(1, 3, 3) + 1.0j*np.random.rand(1, 3, 3))
        tests.append(np.random.rand(3, 3, 3) + 1.0j*np.random.rand(3, 3, 3))
        A = np.random.rand(1, 3, 3) + 1.0j*np.random.rand(1, 3, 3)
        A[0, 0, :] = A[0, 1, :]
        tests.append(A)

        for test in tests:
            pinv_test = np.zeros_like(test)
            for i in range(pinv_test.shape[0]):
                pinv_test[i] = pinv2(test[i])

            pinv_array(test)
            assert_array_almost_equal(test, pinv_test, decimal=4)
Пример #23
0
    def train(self, trajs, silent=False):
        trans_obs = [
            self._transform_observations(traj.obs[:]) for traj in trajs
        ]
        X = np.concatenate([obs[:-1, :] for obs in trans_obs]).T
        Y = np.concatenate([obs[1:, :] for obs in trans_obs]).T
        U = np.concatenate([traj.ctrls[:-1, :] for traj in trajs]).T

        n = X.shape[0]  # state dimension
        m = U.shape[0]  # control dimension

        XU = np.concatenate((X, U), axis=0)  # stack X and U together
        if self.method == "lstsq":  # Least Squares Solution
            AB = np.dot(Y, sla.pinv2(XU))
            A = AB[:n, :n]
            B = AB[:n, n:]
        elif self.method == "lasso":  # Call lasso regression on coefficients
            print("Call Lasso")
            clf = Lasso(alpha=self.lasso_alpha)
            clf.fit(XU.T, Y.T)
            AB = clf.coef_
            A = AB[:n, :n]
            B = AB[:n, n:]
        elif self.method == "stable":  # Compute stable A, and B
            print("Compute Stable Koopman")
            # call function
            A, _, _, _, B, _ = stabilize_discrete(X, U, Y)

        self.A, self.B = A, B
Пример #24
0
def _pseudo_inverse_dense(L, rhoss, method='direct', **pseudo_args):
    """
    Internal function for computing the pseudo inverse of an Liouvillian using
    dense matrix methods. See pseudo_inverse for details.
    """
    if method == 'direct':
        rho_vec = np.transpose(mat2vec(rhoss.full()))

        tr_mat = tensor([identity(n) for n in L.dims[0][0]])
        tr_vec = np.transpose(mat2vec(tr_mat.full()))

        N = np.prod(L.dims[0][0])
        I = np.identity(N * N)
        P = np.kron(np.transpose(rho_vec), tr_vec)
        Q = I - P
        LIQ = np.linalg.solve(L.full(), Q)
        R = np.dot(Q, LIQ)

        return Qobj(R, dims=L.dims)

    elif method == 'numpy':
        return Qobj(np.linalg.pinv(L.full()), dims=L.dims)

    elif method == 'scipy':
        return Qobj(la.pinv(L.full()), dims=L.dims)

    elif method == 'scipy2':
        return Qobj(la.pinv2(L.full()), dims=L.dims)

    else:
        raise ValueError("Unsupported method '%s'. Use 'direct' or 'numpy'" %
                         method)
Пример #25
0
def solve_dXdE(Espan, nsteps, Xini, Jlam, dVdElam, S):
    '''
	Parameters
	Espan: df, 1st and 2nd columns are integration interval, enzyme in rows
	nsteps: int, # of integration steps
	Xini: array, ini values of X
	Jlam: lambdified function, Jacobian matrix
	dVdElam: lambdified function, dVdE
	S: df, stoichiometric matrix, metabolite in rows, reaction in columns
		
	Returns
	Eout: df, enzyme expression range, enzyme in rows, columns are the same with Xout 
	Xout: df, metabolite concentration range, metabolite in rows, columns are the same with Eout (initial input metabolite not included)
	'''

    import numpy as np
    import pandas as pd
    from scipy.linalg import eigvals, pinv2
    from constants import eigThreshold

    # prepare initial X, E
    Espan = np.matrix(Espan)

    dE = (Espan[:, 1] - Espan[:, 0]) / nsteps

    X = np.matrix(Xini[:, np.newaxis])
    E = Espan[:, 0]

    # prepare initial Xout, Eout
    Xout = pd.DataFrame(index=S.index, columns=range(nsteps + 1))
    Eout = pd.DataFrame(index=S.columns, columns=range(nsteps + 1))

    Xout.iloc[:, 0] = X
    Eout.iloc[:, 0] = E

    for i in range(1, nsteps + 1):

        XE = np.array(np.concatenate((X, E)))

        # update Jacobian matrix and screen
        J = np.matrix(Jlam(*XE)).astype(np.float)

        if np.any(eigvals(J).real >= eigThreshold): break

        # update X, E and screen
        dVdE = np.matrix(dVdElam(*XE)).astype(np.float)

        dX = -pinv2(J) * np.matrix(S) * dVdE * np.matrix(dE)

        X = X + dX
        E = E + dE

        if X.min() <= 0: break

        # update Xout, Eout
        Xout.iloc[:, i] = X
        Eout.iloc[:, i] = E

    return Eout, Xout
Пример #26
0
def calculate_control_speed(V, Baxis_mat, config, F_matrix, wheels_angles, M0e, Tb0, dt):
	J = get_jacobian(Baxis_mat, config[3:8], F_matrix, M0e, Tb0)
	control_speeds = sc.pinv2(J, pinv_cutt_off_threshold) @ V
	config, wheels_angles = NextState(config, wheels_angles, control_speeds.reshape((9,1)), dt, 0, F_matrix)
	# test if any joint exceeds limits
	violations = testJointsLimits(config)
	# if any joint is found to be exceeding limit, the associated jacobian column will be zeroed out and the twist will be recalculated
	recalculate_twist = True if any(violations) else False
	for i in range(3,len(violations)):
		if violations[i] == True:
			J[0:6, i + 1] = np.zeros((6,))

	if recalculate_twist:
		control_speeds = sc.pinv2(J, pinv_cutt_off_threshold) @ V
	# this is to balance the effect of multiplying the Chassis jacobian by the factor
	control_speeds[0:4] = control_speeds[0:4]*(wheels_influence_factor)
	return control_speeds
Пример #27
0
 def test_simple(self):
     a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a, a_pinv),
                               [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a, a_pinv),
                               [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
Пример #28
0
Файл: ML.py Проект: pcolo/regret
def neurons(x,y,nb_neurons):
    n=x.shape[1]
    # random generation of the neurons parameters
    w=st.norm.rvs(size=(n, nb_neurons)) 
    b=st.norm.rvs(size=(1,nb_neurons))
    h=H(w,b,x) # activation matrix computation
    beta_chapeau=dot(la.pinv2(h),y) # Penrose-Moore inversion
    return w,b,beta_chapeau
Пример #29
0
 def test_check_finite(self):
     a = array([[1,2,3],[4,5,6.],[7,8,10]])
     a_pinv = pinv(a, check_finite=False)
     assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
     a_pinv = pinv2(a, check_finite=False)
     assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
     a_pinv = pinv3(a, check_finite=False)
     assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
Пример #30
0
    def _fit_regression(self, y):
        """ Fit regression using pseudo-inverse or supplied regressor"""
        if self.regressor is None:
            self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
        else:
            self.regressor.fit(self.hidden_activations_, y)

        self.fitted_ = True
Пример #31
0
 def test_check_finite(self):
     a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
     a_pinv = pinv(a, check_finite=False)
     assert_array_almost_equal(dot(a, a_pinv),
                               [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
     a_pinv = pinv2(a, check_finite=False)
     assert_array_almost_equal(dot(a, a_pinv),
                               [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
Пример #32
0
 def test_simple_real(self):
     a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
     a_pinv = pinv3(a)
     assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
Пример #33
0
def test_pinvh_nonpositive():
    a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
    a = np.dot(a, a.T)
    u, s, vt = np.linalg.svd(a)
    s[0] *= -1
    a = np.dot(u * s, vt)  # a is now symmetric non-positive and singular
    a_pinv = pinv2(a)
    a_pinvh = pinvh(a)
    assert_almost_equal(a_pinv, a_pinvh)
Пример #34
0
 def fit(self,X,y):
     self.hl = HiddenLayer(self.k, kernel=self.kernel, p=self.p, compute_widths=self.compute_widths, 
                           set_centers=self.set_centers, verbose=self.verbose) 
     # Computes hidden layer actiovations.
     self.hidden_ = self.hl.fit_transform(X)
     # Computes output layer weights. 
     if self.verbose: print("Solving output weights.")
     self.w_ = np.dot(linalg.pinv2(self.hidden_),y) 
     return self 
Пример #35
0
    def __init__(self, cov, lllim, dlogl, nobj):
        self.cov = cov # enforce_posdef(cov)
        self.nspec = len(cov)
        self.lllim = lllim
        self.loglllim = np.log10(self.lllim)
        self.dlogl = dlogl
        self.nobj = nobj

        self.precision, self.cov_rank = pinv2(self.cov, return_rank=True, rcond=1.0e-3)
Пример #36
0
 def test_nonpositive(self):
     a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
     a = np.dot(a, a.T)
     u, s, vt = np.linalg.svd(a)
     s[0] *= -1
     a = np.dot(u * s, vt)  # a is now symmetric non-positive and singular
     a_pinv = pinv2(a)
     a_pinvh = pinvh(a)
     assert_array_almost_equal(a_pinv, a_pinvh)
def zero_freq_noise(liouvillian, jump_liouvillian, sys_dim, stationary_state, dv_pops, Gamma_L, Gamma_R):
    J_1 = cs.differentiate_jump_matrix(jump_liouvillian)
    Q = np.eye(sys_dim**2) - np.outer(stationary_state, dv_pops)
    R0 = np.dot(Q, np.dot(la.pinv2(-liouvillian), Q)) #analytic_R0(Gamma_L, Gamma_R) # 
    
    noise = - cs.trace_density_vector(np.dot(cs.differentiate_jump_matrix(J_1), stationary_state), dv_pops) \
                        - 2. * cs.trace_density_vector(np.dot(np.dot(np.dot(J_1, R0), J_1), stationary_state), dv_pops)
    
    return noise
Пример #38
0
    def __init__(self, obs_ens, observation, obs_err_cov):
        """Prepare the update."""
        Y           = mean0(obs_ens)
        obs_cov     = obs_err_cov*(N-1) + Y.T@Y
        obs_pert    = randn(N, len(observation)) @ sqrt(obs_err_cov)
        innovations = observation - (obs_ens + obs_pert)

        # (pre-) Kalman gain * Innovations
        self.KGdY = innovations @ sla.pinv2(obs_cov) @ Y.T
Пример #39
0
def fastica(X,
            nSources=None,
            algorithm="parallel fp",
            decorrelation="mdum",
            nonlinearity="logcosh",
            alpha=1.0,
            maxIterations=500,
            tolerance=1e-05,
            Winit=None,
            scaled=True):
    algorithm_funcs = {'parallel fp': ica_par_fp, 'deflation': ica_def}
    orthog_funcs = {'mdum': decorrelation_mdum, 'witer': decorrelation_witer}

    if nonlinearity == 'logcosh':
        g = lc
        gprime = lcp
    elif nonlinearity == 'exp':
        g = gauss
        gprime = gaussp
    elif nonlinearity == 'skew':
        g = skew
        gprime = skewp
    else:
        g = cube
        gprime = cubep

    nmix, nsamp = X.shape

    if nSources is None:
        nSources = nmix
    if Winit is None:
        Winit = randn(nSources, nSources)

    # preprocessing (centering/whitening/pca)
    rowmeansX, X = rowcenter(X)
    Kw, Kd = whiteningmatrix(X, nSources)
    X = dot(Kw, X)

    #kwargs = {'tolerance': tolerance, 'g': g, 'gprime': gprime, 'orthog': orthog_funcs[decorrelation], 'alpha': alpha,
    #         'maxIterations': maxIterations, 'Winit': Winit}
    func = algorithm_funcs[algorithm]

    # run ICA
    W = func(X)  #, **kwargs)

    # consruct the sources - means are not restored
    S = dot(W, X)

    # mixing matrix
    A = pinv2(dot(W, Kw))

    if scaled == True:
        S = S / S.std(axis=-1)[:, newaxis]
        A = A * S.std(axis=-1)[newaxis, :]

    return A, W, S
Пример #40
0
    def compute_transcription_factor_activity(
            self, allow_self_interactions_for_duplicate_prior_columns=True):
        # Find TFs that have non-zero columns in the priors matrix
        non_zero_tfs = pd.Index(
            self.prior.columns[(self.prior != 0).any(axis=0)])
        # Delete tfs that have neither prior information nor expression
        delete_tfs = self.prior.columns.difference(
            self.expression_matrix.index).difference(non_zero_tfs)

        # Raise warnings
        if len(delete_tfs) > 0:
            message = "{num} TFs are removed from activity (no expression or prior exists)".format(
                num=len(delete_tfs))
            utils.Debug.vprint(message, level=0)
            self.prior = self.prior.drop(delete_tfs, axis=1)

        # Create activity dataframe with values set by default to the transcription factor's expression
        # Create an empty dataframe [K x G]
        activity = pd.DataFrame(0.0,
                                index=self.prior.columns,
                                columns=self.expression_matrix.columns)

        # Populate with expression values as a default
        add_default_activity = self.prior.columns.intersection(
            self.expression_matrix.index)
        activity.loc[add_default_activity, :] = self.expression_matrix.loc[
            add_default_activity, :]

        # Find all non-zero TFs that are duplicates of any other non-zero tfs
        is_duplicated = self.prior[non_zero_tfs].transpose().duplicated(
            keep=False)

        # Find non-zero TFs that are also present in target gene list
        self_interacting_tfs = non_zero_tfs.intersection(self.prior.index)

        if is_duplicated.sum() > 0:
            duplicates = is_duplicated[is_duplicated].index.tolist()

            # If this flag is set to true, don't count duplicates as self-interacting when setting the diag to zero
            if allow_self_interactions_for_duplicate_prior_columns:
                self_interacting_tfs = self_interacting_tfs.difference(
                    duplicates)

        # Set the diagonal of the matrix subset of self-interacting tfs to zero
        subset = self.prior.loc[self_interacting_tfs,
                                self_interacting_tfs].values
        np.fill_diagonal(subset, 0)
        self.prior.at[self_interacting_tfs, self_interacting_tfs] = subset

        # Set the activity of non-zero tfs to the pseudoinverse of the prior matrix times the expression
        if len(non_zero_tfs) > 0:
            activity.loc[non_zero_tfs, :] = np.matrix(
                linalg.pinv2(self.prior[non_zero_tfs])) * np.matrix(
                    self.expression_matrix_halftau)

        return activity
Пример #41
0
 def fit(self, X, y):
     self.X = X
     self.y = y
     self.n, self.p = self.X.shape
     self.R = self.R_ij(self.X)
     #print self.R
     self.RI = pinv2(self.R)
     # self.RI = inv(self.R)
     #print self.RI
     self.b = self.get_b()
Пример #42
0
 def _compute_cov(self):
     '''Compute covariance
     '''
     somefixed = (self.par_fix is not None) and any(isfinite(self.par_fix))
     H = np.asmatrix(self._hessian(self._fitfun, self.par, self.data))
     self.H = H
     try:
         if somefixed:
             allfixed = all(isfinite(self.par_fix))
             if allfixed:
                 self.par_cov[:, :] = 0
             else:
                 pcov = -pinv2(H[self.i_notfixed, :][..., self.i_notfixed])
                 for row, ix in enumerate(list(self.i_notfixed)):
                     self.par_cov[ix, self.i_notfixed] = pcov[row, :]
         else:
             self.par_cov = -pinv2(H)
     except:
         self.par_cov[:, :] = nan
Пример #43
0
 def _calcBatchUpdate(self, fitnesses):
     invSigma = inv(self.sigma)
     samples = self.allSamples[-self.batchSize:]
     phi = zeros((self.batchSize, self.numParams+1))
     phi[:, :self.xdim] = self._logDerivsX(samples, self.x, invSigma)
     phi[:, self.xdim:-1] = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma)
     phi[:, -1] = 1
     
     update = dot(pinv2(phi), fitnesses)[:-1]
     return update
Пример #44
0
    def fit(self):
        self.R = self.R_ij(self.X)
        if np.linalg.matrix_rank(self.R) < self.R.shape[1]:
            wait = 1.
        try:
            self.RI = pinv2(self.R)
        except:
            wait = 1.

        self.b = self.get_b()
Пример #45
0
    def __init__(self, obs_ens, observations, obs_err_cov):
        """Prepare the update."""
        Y, _ = center(obs_ens, rescale=True)
        obs_cov = obs_err_cov * (N - 1) + Y.T @ Y
        obs_pert = rnd.randn(N, len(observations)) @ sqrt(obs_err_cov)
        innovations = observations - (obs_ens + obs_pert)

        # (pre-) Kalman gain * Innovations
        # Also called the X5 matrix by Evensen'2003.
        self.KGdY = innovations @ sla.pinv2(obs_cov) @ Y.T
    def fit(self):
        self.R = self.R_ij(self.X)
        if np.linalg.matrix_rank(self.R) < self.R.shape[1]:
            wait = 1.
        try:
            self.RI = pinv2(self.R)
        except:
            wait = 1.

        self.b = self.get_b()
Пример #47
0
 def __slowSolve__(x, m):
     n = len(x)
     p = m + 1
     r = np.zeros(p)
     nx = np.min((p, n))
     x = np.correlate(x, x, 'full')
     r[:nx] = x[n - 1:n+m]
     a = np.dot(sla.pinv2(sla.toeplitz(r[:-1])), -r[1:])
     gain = np.sqrt(r[0] + np.sum(a * r[1:]))
     return a, gain
Пример #48
0
 def _fit_regression(self, y):
     """
     fit regression using internal linear regression
     or supplied regressor
     """
     if (self.regressor is None):
         self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
     else:
         self.regressor.fit(self.hidden_activations_, y)
     self.fitted_ = True
Пример #49
0
    def fit2(self, X, y):
        """
        Fit the model using X, y as training data.

        Using Woodbury formula

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like of shape [n_samples, n_outputs]
            Target values (class labels in classification, real numbers in
            regression)

        Returns
        -------
        self : object

            Returns an instance of self.
        """
        # fit random hidden layer and compute the hidden layer activations
        #self.H = self.hidden_layer.fit_transform(X)
        H = self._create_random_layer().fit_transform(X)
        y = as_float_array(y, copy=True)

        if self.beta is None:
            # Then, this is the first time the model is fitted
            assert len(X) >= self.n_hidden, ValueError(
                "The first time the model is fitted, X must have "
                "at least equal number of samples than n_hidden value!")
            # TODO: handle cases of singular matrices (maybe with a try clause)
            self.P = pinv2(safe_sparse_dot(H.T, H))
            self.beta = multiple_safe_sparse_dot(self.P, H.T, y)
        else:
            M = np.eye(len(H)) + multiple_safe_sparse_dot(H, self.P, H.T)
            self.P -= multiple_safe_sparse_dot(self.P, H.T, pinv2(M), H,
                                               self.P)
            e = y - safe_sparse_dot(H, self.beta)
            self.beta += multiple_safe_sparse_dot(self.P, H.T, e)

        return self
Пример #50
0
def baseline(y, deg=3, max_it=100, tol=1e-3):
    """Computes the baseline of a given data.

    Iteratively performs a polynomial fitting in the data to detect its
    baseline. At every iteration, the fitting weights on the regions with
    peaks are reduced to identify the baseline only.

    Parameters
    ----------
    y : ndarray
        Data to detect the baseline.
    deg : int
        Degree of the polynomial that will estimate the data baseline. A low
        degree may fail to detect all the baseline present, while a high
        degree may make the data too oscillatory, especially at the edges.
    max_it : int
        Maximum number of iterations to perform.
    tol : float
        Tolerance to use when comparing the difference between the current
        fit coefficient and the ones from the last iteration. The iteration
        procedure will stop when the difference between them is lower than
        *tol*.

    Returns
    -------
    ndarray
        Array with the baseline amplitude for every original point in *y*

    Information:
    This function is stolen from https://bitbucket.org/lucashnegri/peakutils
    Documentation: https://pythonhosted.org/PeakUtils/reference.html
    (MIT License)
    """
    order = deg + 1
    coeffs = np.ones(order)

    # try to avoid numerical issues
    cond = math.pow(y.max(), 1. / order)
    x = np.linspace(0., cond, y.size)
    base = y.copy()

    vander = np.vander(x, order)
    vander_pinv = LA.pinv2(vander)

    for _ in range(max_it):
        coeffs_new = np.dot(vander_pinv, y)

        if LA.norm(coeffs_new - coeffs) / LA.norm(coeffs) < tol:
            break

        coeffs = coeffs_new
        base = np.dot(vander, coeffs)
        y = np.minimum(y, base)

    return base
Пример #51
0
def derivativeSquareOfGeodesicOnSPD(x,y):
  if len(x.shape)!=1:
    sq = sqrtm(x)
    invsq = pinv2(sq)
    F = np.dot(np.dot(invsq, y), invsq)
    return 2*np.dot(np.dot(sq,logm(F),sq))
  else:
    sq = x**0.5
    invsq = 1.0 / sq
    F = invsq * y * invsq
    return 2*sq*np.log(F)*sq
Пример #52
0
def geodesicDistanceOnSPD(x, y):
  if len(x.shape)!=1:
    sq = sqrtm(x)
    invsq = pinv2(sq)
    F = np.dot(np.dot(invsq, y), invsq)
    return np.linalg.norm(logm(F))
  else:
    sq = x**0.5
    invsq = 1.0 / sq
    F = invsq * y * invsq
    return np.linalg.norm(np.log(F))
Пример #53
0
def calcMeanOnSPD(p, q):
  if len(p.shape)!=1:
    sq = sqrtm(p)
    invsq = pinv2(sq)
    F = sqrtm(np.dot(np.dot(sq, q), sq))
    return np.dot(np.dot(invsq,F),invsq)
  else:
    sq = p**0.5
    invsq = 1.0 / sq
    F = (sq * q * sq) **0.5
    return invsq * F * invsq
Пример #54
0
 def _compute_cov(self):
     """Compute covariance
     """
     somefixed = (self.par_fix is not None) and any(isfinite(self.par_fix))
     # H1 = numpy.asmatrix(self.dist.hessian_nnlf(self.par, self.data))
     H = numpy.asmatrix(self.dist.hessian_nlogps(self.par, self.data))
     self.H = H
     try:
         if somefixed:
             allfixed = all(isfinite(self.par_fix))
             if allfixed:
                 self.par_cov[:, :] = 0
             else:
                 pcov = -pinv2(H[self.i_notfixed, :][..., self.i_notfixed])
                 for row, ix in enumerate(list(self.i_notfixed)):
                     self.par_cov[ix, self.i_notfixed] = pcov[row, :]
         else:
             self.par_cov = -pinv2(H)
     except:
         self.par_cov[:, :] = nan
Пример #55
0
def laplacian_psinv(self):
    l_l = []
    #laplacian matrix
    for n in xrange(self.number_table):
        if(self.graphList[n] != None):
            if(self.graphList[n].laplacian_psinv == None):
                if(self.graphList[n].laplacian_psinv == None):
                    #get the graph laplacian
                    l = self.graphList[n].graph.laplacian(weights=self.graphList[n].graph.es["weight"], normalized=False)
                    #pseudoinverse
                    self.graphList[n].laplacian_psinv = lpsinv = linalg.pinv2(l)
Пример #56
0
    def _fit_3step(self, X, Y):
        self.hl = HiddenLayer(self.h) 
        self.hl.fit(X) 

        D = self.hl.transform(X) 
        
        # compute w 
        print(D.shape)
        print(Y.shape)
        self.w_ = np.dot(linalg.pinv2(D), Y) 
        return self
Пример #57
0
    def _fit_regression(self, y):
        """
        fit regression using pseudo-inverse
        or supplied regressor
        """
        if (self.regressor is None):
            self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
        else:
            self.regressor.fit(self.hidden_activations_, y)

        self.fitted_ = True