def testFstatistic(self):

        # Test on a model with more than one regressor

        expectedFstatistic = 48625.747064132738
        self.assertAlmostEqual(self.linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)

        # Test on a model with only one regressor

        expectedFstatistic = 17403.423925909559
        linearModel = LinearModel([np.square(self.time)], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        self.assertAlmostEqual(linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)
Beispiel #2
0
    def testFstatistic(self):
    
        # Test on a model with more than one regressor
        
        expectedFstatistic = 48625.747064132738
        self.assertAlmostEqual(self.linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)

        # Test on a model with only one regressor
        
        expectedFstatistic = 17403.423925909559
        linearModel = LinearModel([np.square(self.time)], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        self.assertAlmostEqual(linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)
Beispiel #3
0
    def testCoefficientOfDetermination(self):
    
        # Test using a model with an intercept

        expectedCoefficientOfDetermination = 0.99983550516908659
        self.assertAlmostEqual(self.linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7)
    
        # Test using a model without an intercept

        linearModel = LinearModel([self.time**2], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        expectedCoefficientOfDetermination = 0.99948312767720937
        self.assertAlmostEqual(linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7)
    def testThypothesisTest(self):

        regressorList = [np.ones_like(self.time), self.time ** 2, self.time ** 3]
        regressorNames = ["1", "t^2", "t^3"]
        linearModel = LinearModel(regressorList, regressorNames)
        linearFit = linearModel.fitData(self.observations)

        alpha = 0.05  # significance level
        expectedOutput = np.array([True, True, False])
        nullRejected = linearFit.regressorTtest(alpha)

        self.assertTrue(isinstance(nullRejected, np.ndarray))
        self.assertTrue(nullRejected.dtype == np.bool)
        self.assertEqual(len(nullRejected), len(regressorList))
        self.assertTrue(np.all(nullRejected == expectedOutput))
Beispiel #5
0
    def testThypothesisTest(self):
            
        regressorList = [np.ones_like(self.time), self.time**2, self.time**3]
        regressorNames = ["1", "t^2", "t^3"]
        linearModel = LinearModel(regressorList, regressorNames)
        linearFit = linearModel.fitData(self.observations)

        alpha = 0.05                                     # significance level
        expectedOutput = np.array([True, True, False])
        nullRejected = linearFit.regressorTtest(alpha)
        
        self.assertTrue(isinstance(nullRejected, np.ndarray))
        self.assertTrue(nullRejected.dtype == np.bool)
        self.assertEqual(len(nullRejected), len(regressorList))
        self.assertTrue(np.all(nullRejected == expectedOutput))
    def testCoefficientOfDetermination(self):

        # Test using a model with an intercept

        expectedCoefficientOfDetermination = 0.99983550516908659
        self.assertAlmostEqual(
            self.linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7
        )

        # Test using a model without an intercept

        linearModel = LinearModel([self.time ** 2], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        expectedCoefficientOfDetermination = 0.99948312767720937
        self.assertAlmostEqual(
            linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7
        )
class LinearFitTestCase(unittest.TestCase):

    """
    Test the LinearFit class and its methods
    
    """

    def setUp(self):

        # Generate a simple dataset.
        # Noise-values are drawn from a N(0,1), but are truncated.

        self.nObservations = 10
        self.nParameters = 2
        self.time = np.arange(self.nObservations, dtype=np.double)
        self.noise = np.array([-0.72, -1.02, 0.52, 0.24, -0.17, -1.78, 0.56, 0.33, 0.19, 1.03])
        self.observations = 3.0 + 2.0 * self.time ** 2 + self.noise
        regressorList = [np.ones_like(self.time), self.time ** 2]
        regressorNames = ["1", "t^2"]
        self.linearModel = LinearModel(regressorList, regressorNames)
        self.linearFit = self.linearModel.fitData(self.observations)

    def tearDown(self):
        pass

    def testLinearFit(self):

        self.assertTrue(isinstance(self.linearFit, LinearFit))

    def testObservations(self):

        # Since the linear model did not have a covariance matrix specified, the
        # decorrelated observations, should be simply the original observations

        self.assertTrue(isinstance(self.linearFit.observations(weighted=True), np.ndarray))
        self.assertEqual(self.linearFit.observations(weighted=True).shape, (self.nObservations,))
        self.assertTrue(np.alltrue(self.observations == self.linearFit.observations(weighted=True)))
        self.assertTrue(np.alltrue(self.observations == self.linearFit.observations(weighted=False)))

    def testRegressionCoefficients(self):

        expectedCoeff = np.array([2.47811858, 2.01543444])
        self.assertTrue(isinstance(self.linearFit.regressionCoefficients(), np.ndarray))
        self.assertEqual(self.linearFit.regressionCoefficients().shape, (self.nParameters,))
        self.assertTrue(np.allclose(self.linearFit.regressionCoefficients(), expectedCoeff, rtol=1.0e-6, atol=1.0e-08))

    def testResiduals(self):

        expectedResiduals = np.array(
            [
                -0.19811858,
                -0.51355301,
                0.98014368,
                0.6229715,
                0.10493045,
                -1.64397947,
                0.52624173,
                0.09559406,
                -0.27592247,
                0.30169212,
            ]
        )
        self.assertTrue(isinstance(self.linearFit.residuals(weighted=False), np.ndarray))
        self.assertEqual(self.linearFit.residuals(weighted=False).shape, (self.nObservations,))
        self.assertTrue(
            np.allclose(self.linearFit.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.0e-08)
        )

    def testPredictions(self):

        expectedPredictions = np.array(
            [
                2.47811858,
                4.49355301,
                10.53985632,
                20.6170285,
                34.72506955,
                52.86397947,
                75.03375827,
                101.23440594,
                131.46592247,
                165.72830788,
            ]
        )
        self.assertTrue(isinstance(self.linearFit.predictions(weighted=False), np.ndarray))
        self.assertEqual(self.linearFit.predictions(weighted=False).shape, (self.nObservations,))
        self.assertTrue(
            np.allclose(self.linearFit.predictions(weighted=False), expectedPredictions, rtol=1.0e-6, atol=1.0e-08)
        )

    def testCovarianceMatrix(self):

        expectedCovMatrix = np.array([[1.28084978e-01, -2.38076167e-03], [-2.38076167e-03, 8.35354974e-05]])
        self.assertTrue(isinstance(self.linearFit.covarianceMatrix(), np.ndarray))
        self.assertEqual(self.linearFit.covarianceMatrix().shape, (self.nParameters, self.nParameters))
        self.assertTrue(np.allclose(self.linearFit.covarianceMatrix(), expectedCovMatrix, rtol=1.0e-6, atol=1.0e-08))

    def testErrorBars(self):

        expectedErrors = np.array([0.35788962, 0.00913978])
        self.assertTrue(isinstance(self.linearFit.errorBars(), np.ndarray))
        self.assertEqual(self.linearFit.errorBars().shape, (self.nParameters,))
        self.assertTrue(np.allclose(self.linearFit.errorBars(), expectedErrors, rtol=1.0e-6, atol=1.0e-08))

    def testConfidenceIntervals(self):

        alpha = 0.05
        expectedLower = np.array([1.65282364, 1.99435808])
        expectedUpper = np.array([3.30341351, 2.0365108])
        self.assertEqual(len(self.linearFit.confidenceIntervals(0.05)), 2)
        lower, upper = self.linearFit.confidenceIntervals(0.05)
        self.assertTrue(isinstance(lower, np.ndarray))
        self.assertTrue(isinstance(upper, np.ndarray))
        self.assertEqual(len(lower), self.nParameters)
        self.assertEqual(len(upper), self.nParameters)
        self.assertTrue(np.allclose(lower, expectedLower, rtol=1.0e-6, atol=1.0e-08))
        self.assertTrue(np.allclose(upper, expectedUpper, rtol=1.0e-6, atol=1.0e-08))

    def testT_values(self):

        expectedTvalues = np.array([6.92425385, 220.51235807])
        self.assertTrue(isinstance(self.linearFit.t_values(), np.ndarray))
        self.assertEqual(self.linearFit.t_values().shape, (self.nParameters,))
        self.assertTrue(np.allclose(self.linearFit.t_values(), expectedTvalues, rtol=1.0e-6, atol=1.0e-08))

    def testSumOfSquaredResiduals(self):

        expectedSumSqResiduals = 4.81866162957
        self.assertAlmostEqual(self.linearFit.sumSqResiduals(weighted=False), expectedSumSqResiduals, places=7)

    def testResidualVariance(self):

        expectedResidualVariance = 0.60233270369599989
        self.assertAlmostEqual(self.linearFit.residualVariance(weighted=False), expectedResidualVariance, places=7)

    def testCorrelationMatrix(self):

        expectedCorrelationMatrix = np.array([[1.0, -0.72783225], [-0.72783225, 1.0]])
        self.assertTrue(
            np.allclose(self.linearFit.correlationMatrix(), expectedCorrelationMatrix, rtol=1.0e-6, atol=1.0e-08)
        )

    def testCoefficientOfDetermination(self):

        # Test using a model with an intercept

        expectedCoefficientOfDetermination = 0.99983550516908659
        self.assertAlmostEqual(
            self.linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7
        )

        # Test using a model without an intercept

        linearModel = LinearModel([self.time ** 2], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        expectedCoefficientOfDetermination = 0.99948312767720937
        self.assertAlmostEqual(
            linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7
        )

    def testBICvalue(self):

        expectedBICvalue = -0.39313345804942657
        self.assertAlmostEqual(self.linearFit.BICvalue(), expectedBICvalue, places=7)

    def testAICvalue(self):

        expectedAICvalue = 2.6991112629684357
        self.assertAlmostEqual(self.linearFit.AICvalue(), expectedAICvalue, places=7)

    def testThypothesisTest(self):

        regressorList = [np.ones_like(self.time), self.time ** 2, self.time ** 3]
        regressorNames = ["1", "t^2", "t^3"]
        linearModel = LinearModel(regressorList, regressorNames)
        linearFit = linearModel.fitData(self.observations)

        alpha = 0.05  # significance level
        expectedOutput = np.array([True, True, False])
        nullRejected = linearFit.regressorTtest(alpha)

        self.assertTrue(isinstance(nullRejected, np.ndarray))
        self.assertTrue(nullRejected.dtype == np.bool)
        self.assertEqual(len(nullRejected), len(regressorList))
        self.assertTrue(np.all(nullRejected == expectedOutput))

    def testFstatistic(self):

        # Test on a model with more than one regressor

        expectedFstatistic = 48625.747064132738
        self.assertAlmostEqual(self.linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)

        # Test on a model with only one regressor

        expectedFstatistic = 17403.423925909559
        linearModel = LinearModel([np.square(self.time)], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        self.assertAlmostEqual(linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)

    def testFstatisticTest(self):

        # with tested model the same as the true model

        alpha = 0.05
        expectedOutput = True  # null hypothesis rejected
        self.assertTrue(self.linearFit.FstatisticTest(alpha) == expectedOutput)

        # with a fit on pure noise noise

        alpha = 0.05
        expectedOutput = False  # null hypothesis could not be rejected
        linearFit = self.linearModel.fitData(self.noise)
        self.assertTrue(linearFit.FstatisticTest(alpha) == expectedOutput)

    def testEvaluate(self):

        expectedOutput = np.array(
            [
                52.86397947,
                42.8147219,
                33.8820485,
                26.06595927,
                19.36645422,
                13.78353335,
                9.31719665,
                5.96744412,
                3.73427577,
                2.6176916,
                2.6176916,
                3.73427577,
                5.96744412,
                9.31719665,
                13.78353335,
                19.36645422,
                26.06595927,
                33.8820485,
                42.8147219,
                52.86397947,
            ]
        )

        xnew = np.linspace(-5.0, +5.0, 20)
        newRegressorList = [np.ones_like(xnew), xnew ** 2]
        output = self.linearFit.evaluate(newRegressorList)
        self.assertTrue(np.allclose(output, expectedOutput, rtol=1.0e-6, atol=1.0e-08))
    def testFitData(self):

        linearModel = LinearModel(self.regressorList, self.regressorNames)
        self.assertTrue(isinstance(linearModel.fitData(self.observations), LinearFit))
class WeightedLinearFitTestCase(unittest.TestCase):

    """
    Test the LinearFit class with a specified covariance matrix for the
    observations. Test only those method affected by weights.
    
    """

    def setUp(self):

        # Generate simple noiseless dataset

        self.nObservations = 10
        self.nParameters = 2
        time = np.arange(self.nObservations, dtype=np.double)
        self.observations1 = 1.0 + 0.3 * time
        self.observations2 = 1.0 + 0.3 * time
        self.regressorList = [np.ones_like(time), time]
        self.regressorNames = ["1", "t"]
        self.designMatrix = np.column_stack(self.regressorList)

        # First covariance matrix: different weights, no correlations

        covMatrixObserv1 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))

        # Second covariance matrix: different weights and correlations
        # Correlation coefficient: 0.6

        covMatrixObserv2 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))
        for i in range(self.nObservations):
            for j in range(self.nObservations):
                if i >= j:
                    continue
                covMatrixObserv2[i, j] = 0.6 * sqrt(covMatrixObserv2[i, i] * covMatrixObserv2[j, j])
                covMatrixObserv2[j, i] = covMatrixObserv2[i, j]

        # Add noise according to the different covariance matrices

        noise1 = np.array(
            [
                -0.26944792,
                -0.56542802,
                0.62106263,
                -0.03113657,
                0.98090236,
                0.02678669,
                1.2237701,
                -0.50112787,
                -0.47742454,
                1.16351356,
            ]
        )
        noise2 = np.array(
            [
                0.24484502,
                -0.22979797,
                0.40639882,
                0.12137103,
                0.20694025,
                0.68952746,
                -0.30300402,
                -0.11136982,
                0.3549814,
                0.20528704,
            ]
        )

        self.observations1 += noise1
        self.observations2 += noise2

        # Instantiate the (weighted) linear models and their linear fits

        self.linearModel1 = LinearModel(self.regressorList, self.regressorNames, covMatrixObserv1)
        self.linearModel2 = LinearModel(self.regressorList, self.regressorNames, covMatrixObserv2)
        self.linearFit1 = self.linearModel1.fitData(self.observations1)
        self.linearFit2 = self.linearModel2.fitData(self.observations2)

    def tearDown(self):
        pass

    def testDecorrelatedObservations(self):

        expectedObservations = np.array(
            [
                3.9365456,
                0.03889641,
                1.73885587,
                0.65979961,
                0.82899163,
                1.73402234,
                -0.1799988,
                0.37289876,
                1.25537791,
                1.05209144,
            ]
        )

        self.assertTrue(isinstance(self.linearFit2.observations(weighted=True), np.ndarray))
        self.assertEqual(self.linearFit2.observations(weighted=True).shape, (self.nObservations,))
        self.assertTrue(
            np.allclose(expectedObservations, self.linearFit2.observations(weighted=True), rtol=1.0e-6, atol=1.0e-08)
        )

    def testWeightedRegressionCoefficients(self):

        # A test with a diagonal covariance matrix

        expectedWeightedCoeff = np.array([0.76576446, 0.40030724])
        self.assertTrue(
            np.allclose(self.linearFit1.regressionCoefficients(), expectedWeightedCoeff, rtol=1.0e-6, atol=1.0e-08)
        )

        # A test with a non-diagonal covariance matrix

        expectedWeightedCoeff = np.array([1.1623421, 0.3040605])
        self.assertTrue(
            np.allclose(self.linearFit2.regressionCoefficients(), expectedWeightedCoeff, rtol=1.0e-6, atol=1.0e-08)
        )

    def testResiduals(self):

        # A test with a diagonal covariance matrix

        expectedResiduals = np.array(
            [
                -0.03521238,
                -0.43149972,
                0.65468369,
                -0.09782275,
                0.81390894,
                -0.24051397,
                0.85616220,
                -0.96904301,
                -1.04564692,
                0.49498394,
            ]
        )
        self.assertTrue(
            np.allclose(self.linearFit1.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.0e-08)
        )

        # A test with a non-diagonal covariance matrix

        expectedResiduals = np.array(
            [
                0.082502896,
                -0.396200554,
                0.235935777,
                -0.053152473,
                0.028356288,
                0.506883039,
                -0.489708901,
                -0.302135160,
                0.160155600,
                0.006400781,
            ]
        )
        self.assertTrue(
            np.allclose(self.linearFit2.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.0e-08)
        )
Beispiel #10
0
class WeightedLinearFitTestCase(unittest.TestCase):

    """
    Test the LinearFit class with a specified covariance matrix for the
    observations. Test only those method affected by weights.
    
    """

    def setUp(self):
    
        # Generate simple noiseless dataset
         
        self.nObservations = 10
        self.nParameters = 2
        time = np.arange(self.nObservations, dtype=np.double)
        self.observations1 = 1.0 + 0.3 * time
        self.observations2 = 1.0 + 0.3 * time
        self.regressorList = [np.ones_like(time), time]
        self.regressorNames = ["1", "t"]
        self.designMatrix = np.column_stack(self.regressorList)
       
        # First covariance matrix: different weights, no correlations
       
        covMatrixObserv1 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))
       
        # Second covariance matrix: different weights and correlations
        # Correlation coefficient: 0.6
       
        covMatrixObserv2 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))
        for i in range(self.nObservations):
            for j in range(self.nObservations):
                if i>=j: continue
                covMatrixObserv2[i,j] = 0.6 * sqrt(covMatrixObserv2[i,i] * covMatrixObserv2[j,j])
                covMatrixObserv2[j,i] = covMatrixObserv2[i,j]
       
        # Add noise according to the different covariance matrices
       
        noise1 = np.array([-0.26944792, -0.56542802,  0.62106263, -0.03113657,  0.98090236,      \
                            0.02678669,  1.2237701 , -0.50112787, -0.47742454,  1.16351356])         
        noise2 = np.array([ 0.24484502, -0.22979797,  0.40639882,  0.12137103,  0.20694025,      \
                            0.68952746, -0.30300402, -0.11136982,  0.3549814 ,  0.20528704])

        self.observations1 += noise1
        self.observations2 += noise2
    
        # Instantiate the (weighted) linear models and their linear fits
    
        self.linearModel1 = LinearModel(self.regressorList, self.regressorNames, covMatrixObserv1)
        self.linearModel2 = LinearModel(self.regressorList, self.regressorNames, covMatrixObserv2)
        self.linearFit1 = self.linearModel1.fitData(self.observations1)
        self.linearFit2 = self.linearModel2.fitData(self.observations2)
       
        
    def tearDown(self):
       pass


    def testDecorrelatedObservations(self):
    
        expectedObservations = np.array([3.9365456,  0.03889641, 1.73885587, 0.65979961, 0.82899163, 
                                         1.73402234, -0.1799988, 0.37289876, 1.25537791,  1.05209144])
         
        self.assertTrue(isinstance(self.linearFit2.observations(weighted=True), np.ndarray))
        self.assertEqual(self.linearFit2.observations(weighted=True).shape, (self.nObservations,))
        self.assertTrue(np.allclose(expectedObservations, self.linearFit2.observations(weighted=True), rtol=1.0e-6, atol=1.e-08))


    def testWeightedRegressionCoefficients(self):
    
        # A test with a diagonal covariance matrix
       
        expectedWeightedCoeff = np.array([0.76576446, 0.40030724])
        self.assertTrue(np.allclose(self.linearFit1.regressionCoefficients(), expectedWeightedCoeff, rtol=1.0e-6, atol=1.e-08))

        # A test with a non-diagonal covariance matrix
       
        expectedWeightedCoeff = np.array([1.1623421, 0.3040605])
        self.assertTrue(np.allclose(self.linearFit2.regressionCoefficients(), expectedWeightedCoeff, rtol=1.0e-6, atol=1.e-08))


    def testResiduals(self):
    
        # A test with a diagonal covariance matrix
         
        expectedResiduals = np.array([-0.03521238, -0.43149972, 0.65468369, -0.09782275, 0.81390894, -0.24051397, 0.85616220, -0.96904301, -1.04564692, 0.49498394])
        self.assertTrue(np.allclose(self.linearFit1.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.e-08))

        # A test with a non-diagonal covariance matrix
         
        expectedResiduals = np.array([0.082502896, -0.396200554, 0.235935777, -0.053152473, 0.028356288, 0.506883039, -0.489708901, -0.302135160, 0.160155600, 0.006400781])
        self.assertTrue(np.allclose(self.linearFit2.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.e-08))
Beispiel #11
0
class LinearFitTestCase(unittest.TestCase):

    """
    Test the LinearFit class and its methods
    
    """

    def setUp(self):
    
       # Generate a simple dataset. 
       # Noise-values are drawn from a N(0,1), but are truncated.
        
       self.nObservations = 10
       self.nParameters = 2
       self.time = np.arange(self.nObservations, dtype=np.double)
       self.noise = np.array([-.72, -1.02, .52, .24, -.17, -1.78, .56, .33, .19, 1.03])
       self.observations = 3.0 + 2.0 * self.time**2 + self.noise
       regressorList = [np.ones_like(self.time), self.time**2]
       regressorNames = ["1", "t^2"]
       self.linearModel = LinearModel(regressorList, regressorNames)
       self.linearFit = self.linearModel.fitData(self.observations)
        

    def tearDown(self):
        pass


    def testLinearFit(self):

        self.assertTrue(isinstance(self.linearFit, LinearFit))


    def testObservations(self):
    
        # Since the linear model did not have a covariance matrix specified, the
        # decorrelated observations, should be simply the original observations
        
        self.assertTrue(isinstance(self.linearFit.observations(weighted=True), np.ndarray))
        self.assertEqual(self.linearFit.observations(weighted=True).shape, (self.nObservations,))
        self.assertTrue(np.alltrue(self.observations == self.linearFit.observations(weighted=True)))
        self.assertTrue(np.alltrue(self.observations == self.linearFit.observations(weighted=False)))
        

        
    def testRegressionCoefficients(self):
    
       expectedCoeff = np.array([2.47811858, 2.01543444])
       self.assertTrue(isinstance(self.linearFit.regressionCoefficients(), np.ndarray))
       self.assertEqual(self.linearFit.regressionCoefficients().shape, (self.nParameters,))
       self.assertTrue(np.allclose(self.linearFit.regressionCoefficients(), expectedCoeff, rtol=1.0e-6, atol=1.e-08))

         
    def testResiduals(self):
    
       expectedResiduals = np.array([-0.19811858, -0.51355301, 0.98014368, 0.6229715,  
                                      0.10493045, -1.64397947, 0.52624173, 0.09559406, 
                                     -0.27592247,  0.30169212])         
       self.assertTrue(isinstance(self.linearFit.residuals(weighted=False), np.ndarray))
       self.assertEqual(self.linearFit.residuals(weighted=False).shape, (self.nObservations,))
       self.assertTrue(np.allclose(self.linearFit.residuals(weighted=False), expectedResiduals, rtol=1.0e-6, atol=1.e-08))


    def testPredictions(self):

       expectedPredictions = np.array([2.47811858, 4.49355301, 10.53985632, 20.6170285,
                                       34.72506955, 52.86397947, 75.03375827, 
                                       101.23440594, 131.46592247, 165.72830788])
       self.assertTrue(isinstance(self.linearFit.predictions(weighted=False), np.ndarray))
       self.assertEqual(self.linearFit.predictions(weighted=False).shape,  (self.nObservations,))
       self.assertTrue(np.allclose(self.linearFit.predictions(weighted=False), expectedPredictions, rtol=1.0e-6, atol=1.e-08))


    def testCovarianceMatrix(self): 
    
       expectedCovMatrix = np.array([[1.28084978e-01, -2.38076167e-03],
                                     [-2.38076167e-03, 8.35354974e-05]])       
       self.assertTrue(isinstance(self.linearFit.covarianceMatrix(), np.ndarray))
       self.assertEqual(self.linearFit.covarianceMatrix().shape,  (self.nParameters,self.nParameters))
       self.assertTrue(np.allclose(self.linearFit.covarianceMatrix(), expectedCovMatrix, rtol=1.0e-6, atol=1.e-08))


    def testErrorBars(self):
    
       expectedErrors = np.array([0.35788962, 0.00913978])
       self.assertTrue(isinstance(self.linearFit.errorBars(), np.ndarray))
       self.assertEqual(self.linearFit.errorBars().shape, (self.nParameters,))
       self.assertTrue(np.allclose(self.linearFit.errorBars(), expectedErrors, rtol=1.0e-6, atol=1.e-08))


    def testConfidenceIntervals(self):
    
        alpha = 0.05
        expectedLower = np.array([1.65282364, 1.99435808])
        expectedUpper = np.array([3.30341351, 2.0365108 ])
        self.assertEqual(len(self.linearFit.confidenceIntervals(0.05)), 2)
        lower, upper = self.linearFit.confidenceIntervals(0.05)
        self.assertTrue(isinstance(lower, np.ndarray))
        self.assertTrue(isinstance(upper, np.ndarray))
        self.assertEqual(len(lower), self.nParameters)
        self.assertEqual(len(upper), self.nParameters)
        self.assertTrue(np.allclose(lower, expectedLower, rtol=1.0e-6, atol=1.e-08))
        self.assertTrue(np.allclose(upper, expectedUpper, rtol=1.0e-6, atol=1.e-08))


    def testT_values(self):
    
        expectedTvalues = np.array([6.92425385, 220.51235807])
        self.assertTrue(isinstance(self.linearFit.t_values(), np.ndarray))
        self.assertEqual(self.linearFit.t_values().shape, (self.nParameters,))
        self.assertTrue(np.allclose(self.linearFit.t_values(), expectedTvalues, rtol=1.0e-6, atol=1.e-08))
         
            
    def testSumOfSquaredResiduals(self):
    
        expectedSumSqResiduals = 4.81866162957     
        self.assertAlmostEqual(self.linearFit.sumSqResiduals(weighted=False), expectedSumSqResiduals, places=7)


    def testResidualVariance(self):
    
        expectedResidualVariance = 0.60233270369599989
        self.assertAlmostEqual(self.linearFit.residualVariance(weighted=False), expectedResidualVariance, places=7)
       
        
    def testCorrelationMatrix(self):
       
       expectedCorrelationMatrix = np.array([[1., -0.72783225], [-0.72783225, 1.]])
       self.assertTrue(np.allclose(self.linearFit.correlationMatrix(), expectedCorrelationMatrix, rtol=1.0e-6, atol=1.e-08))
    
    
    def testCoefficientOfDetermination(self):
    
        # Test using a model with an intercept

        expectedCoefficientOfDetermination = 0.99983550516908659
        self.assertAlmostEqual(self.linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7)
    
        # Test using a model without an intercept

        linearModel = LinearModel([self.time**2], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        expectedCoefficientOfDetermination = 0.99948312767720937
        self.assertAlmostEqual(linearFit.coefficientOfDetermination(weighted=False), expectedCoefficientOfDetermination, places=7)

    
    def testBICvalue(self):
    
        expectedBICvalue = -0.39313345804942657
        self.assertAlmostEqual(self.linearFit.BICvalue(), expectedBICvalue, places=7)

        
    def testAICvalue(self):
    
        expectedAICvalue = 2.6991112629684357
        self.assertAlmostEqual(self.linearFit.AICvalue(), expectedAICvalue, places=7)
        
    
    def testThypothesisTest(self):
            
        regressorList = [np.ones_like(self.time), self.time**2, self.time**3]
        regressorNames = ["1", "t^2", "t^3"]
        linearModel = LinearModel(regressorList, regressorNames)
        linearFit = linearModel.fitData(self.observations)

        alpha = 0.05                                     # significance level
        expectedOutput = np.array([True, True, False])
        nullRejected = linearFit.regressorTtest(alpha)
        
        self.assertTrue(isinstance(nullRejected, np.ndarray))
        self.assertTrue(nullRejected.dtype == np.bool)
        self.assertEqual(len(nullRejected), len(regressorList))
        self.assertTrue(np.all(nullRejected == expectedOutput))
        

    def testFstatistic(self):
    
        # Test on a model with more than one regressor
        
        expectedFstatistic = 48625.747064132738
        self.assertAlmostEqual(self.linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)

        # Test on a model with only one regressor
        
        expectedFstatistic = 17403.423925909559
        linearModel = LinearModel([np.square(self.time)], ["t^2"])
        linearFit = linearModel.fitData(self.observations)
        self.assertAlmostEqual(linearFit.Fstatistic(weighted=False), expectedFstatistic, places=7)


    def testFstatisticTest(self):
    
        # with tested model the same as the true model
        
        alpha = 0.05
        expectedOutput = True         # null hypothesis rejected
        self.assertTrue(self.linearFit.FstatisticTest(alpha) == expectedOutput)
        
        
        # with a fit on pure noise noise
        
        alpha = 0.05
        expectedOutput = False        # null hypothesis could not be rejected
        linearFit = self.linearModel.fitData(self.noise)
        self.assertTrue(linearFit.FstatisticTest(alpha) == expectedOutput)



    def testEvaluate(self):
    
        expectedOutput = np.array([ 52.86397947,  42.8147219 ,  33.8820485 ,  26.06595927,
                                    19.36645422,  13.78353335,   9.31719665,   5.96744412,
                                    3.73427577,   2.6176916 ,   2.6176916 ,   3.73427577,
                                    5.96744412,   9.31719665,  13.78353335,  19.36645422,
                                    26.06595927,  33.8820485 ,  42.8147219 ,  52.86397947])
                                    
        xnew = np.linspace(-5.0, +5.0, 20)
        newRegressorList = [np.ones_like(xnew), xnew**2]
        output = self.linearFit.evaluate(newRegressorList)
        self.assertTrue(np.allclose(output, expectedOutput, rtol=1.0e-6, atol=1.e-08))
Beispiel #12
0
 def testFitData(self):
 
    linearModel = LinearModel(self.regressorList, self.regressorNames)
    self.assertTrue(isinstance(linearModel.fitData(self.observations), LinearFit))