def testFullRank(self): dimension = 10 nData = 500 design = numpy.random.randn(dimension, nData).transpose() data = numpy.random.randn(nData) fisher = numpy.dot(design.transpose(), design) rhs = numpy.dot(design.transpose(), data) solution, residues, rank, sv = numpy.linalg.lstsq(design, data) cov = numpy.linalg.inv(fisher) s_svd = LeastSquares.fromDesignMatrix(design, data, LeastSquares.DIRECT_SVD) s_design_eigen = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_EIGENSYSTEM) s_design_cholesky = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_CHOLESKY) s_normal_eigen = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_EIGENSYSTEM) s_normal_cholesky = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_CHOLESKY) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv) # test updating solver in-place with the same kind of inputs design = numpy.random.randn(dimension, nData).transpose() data = numpy.random.randn(nData) fisher = numpy.dot(design.transpose(), design) rhs = numpy.dot(design.transpose(), data) solution, residues, rank, sv = numpy.linalg.lstsq(design, data) cov = numpy.linalg.inv(fisher) s_svd.setDesignMatrix(design, data) s_design_eigen.setDesignMatrix(design, data) s_design_cholesky.setDesignMatrix(design, data) s_normal_eigen.setNormalEquations(fisher, rhs) s_normal_cholesky.setNormalEquations(fisher, rhs) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv) # test updating solver in-place with the opposite kind of inputs design = numpy.random.randn(dimension, nData).transpose() data = numpy.random.randn(nData) fisher = numpy.dot(design.transpose(), design) rhs = numpy.dot(design.transpose(), data) solution, residues, rank, sv = numpy.linalg.lstsq(design, data) cov = numpy.linalg.inv(fisher) s_normal_eigen.setDesignMatrix(design, data) s_normal_cholesky.setDesignMatrix(design, data) s_design_eigen.setNormalEquations(fisher, rhs) s_design_cholesky.setNormalEquations(fisher, rhs) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv)
def testFullRank(self): dimension = 10 nData = 500 design = np.random.randn(dimension, nData).transpose() data = np.random.randn(nData) fisher = np.dot(design.transpose(), design) rhs = np.dot(design.transpose(), data) solution, residues, rank, sv = np.linalg.lstsq(design, data, rcond=None) cov = np.linalg.inv(fisher) s_svd = LeastSquares.fromDesignMatrix( design, data, LeastSquares.DIRECT_SVD) s_design_eigen = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_EIGENSYSTEM) s_design_cholesky = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_CHOLESKY) s_normal_eigen = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_EIGENSYSTEM) s_normal_cholesky = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_CHOLESKY) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv) # test updating solver in-place with the same kind of inputs design = np.random.randn(dimension, nData).transpose() data = np.random.randn(nData) fisher = np.dot(design.transpose(), design) rhs = np.dot(design.transpose(), data) solution, residues, rank, sv = np.linalg.lstsq(design, data, rcond=None) cov = np.linalg.inv(fisher) s_svd.setDesignMatrix(design, data) s_design_eigen.setDesignMatrix(design, data) s_design_cholesky.setDesignMatrix(design, data) s_normal_eigen.setNormalEquations(fisher, rhs) s_normal_cholesky.setNormalEquations(fisher, rhs) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv) # test updating solver in-place with the opposite kind of inputs design = np.random.randn(dimension, nData).transpose() data = np.random.randn(nData) fisher = np.dot(design.transpose(), design) rhs = np.dot(design.transpose(), data) solution, residues, rank, sv = np.linalg.lstsq(design, data, rcond=None) cov = np.linalg.inv(fisher) s_normal_eigen.setDesignMatrix(design, data) s_normal_cholesky.setDesignMatrix(design, data) s_design_eigen.setNormalEquations(fisher, rhs) s_design_cholesky.setNormalEquations(fisher, rhs) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_design_cholesky, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_cholesky, solution, rank, fisher, cov, sv)
def testSingular(self): dimension = 10 nData = 100 svIn = (numpy.random.randn(dimension) + 1.0)**2 + 1.0 svIn = numpy.sort(svIn)[::-1] svIn[-1] = 0.0 svIn[-2] = svIn[0] * 1E-4 # Just use SVD to get a pair of orthogonal matrices; we'll use our own singular values # so we can control the stability of the matrix. u, s, vt = numpy.linalg.svd(numpy.random.randn(dimension, nData), full_matrices=False) design = numpy.dot(u * svIn, vt).transpose() data = numpy.random.randn(nData) fisher = numpy.dot(design.transpose(), design) rhs = numpy.dot(design.transpose(), data) threshold = 10 * sys.float_info.epsilon solution, residues, rank, sv = numpy.linalg.lstsq(design, data, rcond=threshold) self.assertClose(svIn, sv) cov = numpy.linalg.pinv(fisher, rcond=threshold) s_svd = LeastSquares.fromDesignMatrix(design, data, LeastSquares.DIRECT_SVD) s_design_eigen = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_EIGENSYSTEM) s_normal_eigen = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_EIGENSYSTEM) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) s_svd.setThreshold(1E-3) s_design_eigen.setThreshold(1E-6) s_normal_eigen.setThreshold(1E-6) self.assertEqual(s_svd.getRank(), dimension - 2) self.assertEqual(s_design_eigen.getRank(), dimension - 2) self.assertEqual(s_normal_eigen.getRank(), dimension - 2) # Just check that solutions are different from before, but consistent with each other; # I can't figure out how get numpy.lstsq to deal with the thresholds appropriately to # test against that. self.assertNotClose(s_svd.getSolution(), solution) self.assertNotClose(s_design_eigen.getSolution(), solution) self.assertNotClose(s_normal_eigen.getSolution(), solution) self.assertClose(s_svd.getSolution(), s_design_eigen.getSolution()) self.assertClose(s_svd.getSolution(), s_normal_eigen.getSolution())
def testSingular(self): dimension = 10 nData = 100 svIn = (np.random.randn(dimension) + 1.0)**2 + 1.0 svIn = np.sort(svIn)[::-1] svIn[-1] = 0.0 svIn[-2] = svIn[0] * 1E-4 # Just use SVD to get a pair of orthogonal matrices; we'll use our own singular values # so we can control the stability of the matrix. u, s, vt = np.linalg.svd(np.random.randn(dimension, nData), full_matrices=False) design = np.dot(u * svIn, vt).transpose() data = np.random.randn(nData) fisher = np.dot(design.transpose(), design) rhs = np.dot(design.transpose(), data) threshold = 10 * sys.float_info.epsilon solution, residues, rank, sv = np.linalg.lstsq( design, data, rcond=threshold) self._assertClose(svIn, sv) cov = np.linalg.pinv(fisher, rcond=threshold) s_svd = LeastSquares.fromDesignMatrix( design, data, LeastSquares.DIRECT_SVD) s_design_eigen = LeastSquares.fromDesignMatrix( design, data, LeastSquares.NORMAL_EIGENSYSTEM) s_normal_eigen = LeastSquares.fromNormalEquations( fisher, rhs, LeastSquares.NORMAL_EIGENSYSTEM) self.check(s_svd, solution, rank, fisher, cov, sv) self.check(s_design_eigen, solution, rank, fisher, cov, sv) self.check(s_normal_eigen, solution, rank, fisher, cov, sv) s_svd.setThreshold(1E-3) s_design_eigen.setThreshold(1E-6) s_normal_eigen.setThreshold(1E-6) self.assertEqual(s_svd.getRank(), dimension - 2) self.assertEqual(s_design_eigen.getRank(), dimension - 2) self.assertEqual(s_normal_eigen.getRank(), dimension - 2) # Just check that solutions are different from before, but consistent with each other; # I can't figure out how get np.lstsq to deal with the thresholds appropriately to # test against that. self._assertNotClose(s_svd.getSolution(), solution) self._assertNotClose(s_design_eigen.getSolution(), solution) self._assertNotClose(s_normal_eigen.getSolution(), solution) self._assertClose(s_svd.getSolution(), s_design_eigen.getSolution()) self._assertClose(s_svd.getSolution(), s_normal_eigen.getSolution())