예제 #1
0
 def get_v(self, X):
     C = dot((1 / (self.K)), dot(X, transpose(X)))
     V, _ = qr(C)
     eigV = eigvals(C)
     eigV = np.sort(eigV)[::-1]
     ind = np.where(eigV > 0)[0]
     r = len(ind)
     return V, r
def find_roots(p,scaling=10):
    """Find roots of a polynomial with coefficients `p` by
    computing eigenvalues of the associated companion matrix.
    
    Overflow can be avoided during the eigenvalue calculation
    by preconditioning the companion matrix with a similarity
    transformation defined by scaling factor `scaling` > 1.
    This transformation will render the eigenvectors in a new
    basis, but the eigenvalues will remain unchanged.
    
    Empirically, `scaling=10` appears to enable successful 
    root-finding on polynomials up to degree 192."""
    
    scaling=numpy.float64(scaling)
    M=companion_matrix(p); N=len(M)
    D=numpy.matrix(numpy.diag(scaling**numpy.arange(N)))
    Dinv=numpy.matrix(numpy.diag((1/scaling)**numpy.arange(N)))
    Mstar=Dinv*M*D # Similarity transform will leave the eigenvalues unchanged
    
    return linalg.eigvals(Mstar)
예제 #3
0
    def mapMStep(self, dist, posterior, data, mix_pi=None, dist_ind=None):

        """ This funciton is based on an empirical bayesian estimation of a linear gaussian with a gaussian prior over B.
            See bishop page 169 for details.
        """

        assert isinstance(dist, LinearGaussianDistribution)

        if isinstance(data, DataSet):
            dt = data.internalData
        elif hasattr(data, "__iter__"):
            dt = data
        else:
            raise TypeError, "Unknown/Invalid input to MStep."

        # First column of data set of the matrix
        y = dt[:, 0]
        # Matrix column of 1's concatenated with rest of columns of data set of the matrix
        #x = np.concatenate((np.array([np.ones(len(dt))]).T, dt[:,1:]), axis=1)
        x = dt[:, 1:]

        yaux = np.array(np.multiply(y, np.matrix(posterior).T))
        mean = np.mean(yaux)

        #eigen values of X^t.X/sigma^2
        xaux = np.array(np.multiply(x, np.matrix(posterior).T))
        XXs = np.dot(xaux.T, x) / np.power(dist.sigma[0], 2)

        if (self.fixed == 0):
            lambdas = la.eigvals(XXs)
            # estimate gamma
            self.gamma = np.sum(np.divide(lambdas, lambdas + self.alpha[dist_ind]))

        # Beta estimation
        beta_numerator = np.dot(xaux.T, y) / np.power(dist.sigma[0], 2)
        beta_denominator = self.alpha[dist_ind] * np.identity(len(x[0])) + XXs
        try:
            betashort = np.dot(np.linalg.inv(beta_denominator), beta_numerator)
            dist.beta = np.concatenate(([mean], betashort), axis=1)
        except la.LinAlgError:
            raise EmptyComponent, "Empty Component: Singular Matrix"

        # Sigma estimation
        dist.predicted = mean + np.dot(x, betashort)
        y_x_betat = np.subtract(y, dist.predicted)
        dist.predicted = np.multiply(dist.predicted, posterior)

        sigma_numerator = np.dot(np.multiply(y_x_betat, posterior), y_x_betat)
        if (self.fixed == 0):
            sigma_denominator = posterior.sum() - self.gamma
        else:
            sigma_denominator = posterior.sum() - 1

        try:
            dist.sigma[0] = np.sqrt(sigma_numerator / sigma_denominator)
        except FloatingPointError:
            dist.sigma[0] = 0.0001

        # alpha
        if (self.fixed == 0):
            self.alpha[dist_ind] = self.gamma / np.dot(dist.beta, dist.beta.T)

        dist.currentPosterior = posterior
예제 #4
0
def is_pos_def(x):
    return np.all(eigvals(x) > 0)
예제 #5
0
    def mapMStep(self, dist, posterior, data, mix_pi=None, dist_ind=None):
        """ This funciton is based on an empirical bayesian estimation of a linear gaussian with a gaussian prior over B.
            See bishop page 169 for details.
        """

        assert isinstance(dist, LinearGaussianDistribution)

        if isinstance(data, DataSet):
            dt = data.internalData
        elif hasattr(data, "__iter__"):
            dt = data
        else:
            raise TypeError, "Unknown/Invalid input to MStep."

        # First column of data set of the matrix
        y = dt[:, 0]
        # Matrix column of 1's concatenated with rest of columns of data set of the matrix
        #x = np.concatenate((np.array([np.ones(len(dt))]).T, dt[:,1:]), axis=1)
        x = dt[:, 1:]

        yaux = np.array(np.multiply(y, np.matrix(posterior).T))
        mean = np.mean(yaux)

        #eigen values of X^t.X/sigma^2
        xaux = np.array(np.multiply(x, np.matrix(posterior).T))
        XXs = np.dot(xaux.T, x) / np.power(dist.sigma[0], 2)

        if (self.fixed == 0):
            lambdas = la.eigvals(XXs)
            # estimate gamma
            self.gamma = np.sum(
                np.divide(lambdas, lambdas + self.alpha[dist_ind]))

        # Beta estimation
        beta_numerator = np.dot(xaux.T, y) / np.power(dist.sigma[0], 2)
        beta_denominator = self.alpha[dist_ind] * np.identity(len(x[0])) + XXs
        try:
            betashort = np.dot(np.linalg.inv(beta_denominator), beta_numerator)
            dist.beta = np.concatenate(([mean], betashort), axis=1)
        except la.LinAlgError:
            raise EmptyComponent, "Empty Component: Singular Matrix"

        # Sigma estimation
        dist.predicted = mean + np.dot(x, betashort)
        y_x_betat = np.subtract(y, dist.predicted)
        dist.predicted = np.multiply(dist.predicted, posterior)

        sigma_numerator = np.dot(np.multiply(y_x_betat, posterior), y_x_betat)
        if (self.fixed == 0):
            sigma_denominator = posterior.sum() - self.gamma
        else:
            sigma_denominator = posterior.sum() - 1

        try:
            dist.sigma[0] = np.sqrt(sigma_numerator / sigma_denominator)
        except FloatingPointError:
            dist.sigma[0] = 0.0001

        # alpha
        if (self.fixed == 0):
            self.alpha[dist_ind] = self.gamma / np.dot(dist.beta, dist.beta.T)

        dist.currentPosterior = posterior