def test_derivatives(self):
        print "Testing derivatives w.r.t. data ... "
        sys.stdout.flush()

        P = []
        for k in range(10):
            myp = 2.0 * np.random.rand(1)[0] + .5
            mys = 3.0 * np.random.rand(1)[0] + 1.0
            p = Distributions.ExponentialPower({'p': myp, 's': mys})
            P.append(p)

        p = Distributions.ProductOfExponentialPowerDistributions({'P': P})

        dat = p.sample(100)
        h = 1e-7
        tol = 1e-4
        Y0 = dat.X.copy()

        df = p.dldx(dat)
        df2 = 0.0 * df
        for i in xrange(dat.size(0)):
            y = Y0.copy()

            y[i, :] = y[i, :] + h
            df2[i, :] = (p.loglik(Data(y)) - p.loglik(dat)) / h

        prot = {}
        prot[
            'message'] = 'Difference in derivative of log-likelihood for PowerExponential greater than ' + str(
                tol)
        prot['max difference'] = np.max(np.abs((df - df2).flatten()))
        prot['mean difference'] = np.mean(np.abs((df - df2).flatten()))

        self.assertTrue(
            np.max(np.abs(df - df2)) < tol, Auxiliary.prettyPrintDict(prot))
Beispiel #2
0
    def test_derivatives(self):
        print "Testing derivative for p-spherically symmetric distribution with radial gamma"
        sys.stdout.flush()
        myu = 3.0 * np.random.rand(1)[0] + 1.0
        mys = 3.0 * np.random.rand(1)[0] + 1.0
        myp = 2 * np.random.rand(1)[0] + .5
        n = 4
        p = Distributions.LpSphericallySymmetric({
            'p':
            myp,
            'n':
            n,
            'rp':
            Distributions.Gamma({
                's': mys,
                'u': myu
            })
        })
        dat = p.sample(50)
        df = p.dldx(dat)
        h = 1e-8
        df2 = np.array(dat.X * np.Inf)
        for k in range(n):
            y = np.array(dat.X)
            y[k, :] += h
            df2[k, :] = (p.loglik(Data(y)) - p.loglik(dat)) / h
        self.assertFalse(np.max(np.abs(df-df2).flatten()) > self.llTol,\
           'Difference ' + str(np.max(np.abs(df-df2).flatten())) + ' in derivative of log-likelihood for p-spherically symmetric greater than ' + str(self.llTol))

        print "[Ok]"
Beispiel #3
0
    def test_RadialFactorizationVsLpNestedNonlinearICA(self):
        print "Testing Radial Factorization vs. Lp-nested ICA..."
        sys.stdout.flush()
        p = np.random.rand() + 1.0
        psource = Distributions.LpSphericallySymmetric({
            'p':
            p,
            'rp':
            Distributions.Gamma({
                'u': 2.0 * np.random.rand() + 1.0,
                's': 5.0 * np.random.rand() + 1.0
            })
        })

        F = NonlinearTransformFactory.RadialFactorization(psource)
        dat = psource.sample(10)

        L = Auxiliary.LpNestedFunction('(0,0:2)', np.array([p]))
        psource2 = Distributions.LpNestedSymmetric({
            'f':
            L,
            'n':
            2.0,
            'rp':
            psource.param['rp'].copy()
        })
        F2 = NonlinearTransformFactory.LpNestedNonLinearICA(psource2)

        tol = 1e-6

        self.assertTrue(np.max(np.abs(F.logDetJacobian(dat) - F2.logDetJacobian(dat))) < tol,\
                        'log-determinants of Lp-nestedICA and Radial Factorization are not equal!')
Beispiel #4
0
    def test_estimate(self):
        print "Testing parameter estimation of Gamma distribution ..."
        sys.stdout.flush()
        myp = 2.0 * np.random.rand(1)[0] + .5
        mys = 10.0 * np.random.rand(1)[0]
        p1 = Distributions.ExponentialPower({'p': myp, 's': mys})

        dat = p1.sample(50000)

        myp = 2.0 * np.random.rand(1)[0] + .5

        mys = 10.0 * np.random.rand(1)[0]
        p2 = Distributions.ExponentialPower({'p': myp, 's': mys})

        p2.estimate(dat)

        prot = {}
        prot[
            'message'] = 'Difference in parameters for Exponential Power distribution greater than threshold'
        prot['s-threshold'] = self.TolParamS
        prot['p-threshold'] = self.TolParamP
        prot['true model'] = p1
        prot['estimated model'] = p2
        self.assertTrue(np.abs(p2.param['p'] - p1.param['p']) < self.TolParamP or np.abs(p2.param['s'] - p1.param['s']) < self.TolParamS,\
                        Auxiliary.prettyPrintDict(prot))
Beispiel #5
0
    def test_MarginalHistogramEqualization(self):
        print "Testing MarginalHistogramEqualization ..."
        sys.stdout.flush()

        psource = Distributions.ISA(
            n=10,
            P=[Distributions.MixtureOfGaussians(K=5) for k in range(10)],
            S=[(k, ) for k in range(10)])
        ptarget = Distributions.ISA(
            n=10,
            P=[Distributions.Gaussian(n=1) for k in range(10)],
            S=[(k, ) for k in range(10)])

        F = NonlinearTransformFactory.MarginalHistogramEqualization(
            psource, ptarget)

        dat = psource.sample(20000)
        ld = F.logDetJacobian(dat)
        ld = np.mean(np.abs(ld)) / dat.size(0) / np.log(2)

        all_source = psource.all(dat)
        all_target = ptarget.all(F * dat)

        tol = 1e-2
        prot = {}
        prot['message'] = 'Difference in logdet correted ALL >  ' + str(tol)
        prot["1/n/log(2) * <|det J|> "] = ld
        prot["ALL(TARGET)"] = all_target
        prot["ALL(SOURCE)"] = all_source
        prot[
            "ALL(TARGET) + 1/n/log(2) * <|det J|> - ALL(SOURCE)"] = all_target + ld - all_source
 def test_derivatives(self):
     print "Testing derivative for p-nested symmetric distribution with radial gamma"
     sys.stdout.flush()
     myu = 10 * np.random.rand(1)[0]
     mys = 10 * np.random.rand(1)[0]
     n = 10
     L = Auxiliary.LpNestedFunction('(0,0,(1,1:4),4,(1,5:8),8:10)')
     p = Distributions.LpNestedSymmetric({
         'f':
         L,
         'n':
         n,
         'rp':
         Distributions.Gamma({
             's': mys,
             'u': myu
         })
     })
     dat = p.sample(50)
     df = p.dldx(dat)
     h = 1e-8
     df2 = np.array(dat.X * np.Inf)
     for k in range(n):
         y = np.array(dat.X)
         y[k, :] += h
         df2[k, :] = (p.loglik(Data(y)) - p.loglik(dat)) / h
     self.assertFalse(np.max(np.abs(df-df2).flatten()) > self.llTol,\
         'Difference in derivative of log-likelihood for p-nested symmetric greater than ' + str(self.llTol))
Beispiel #7
0
 def test_loglik(self):
     print 'Testing log-likelihood of p-spherically symmetric distribution with radial gamma'
     sys.stdout.flush()
     for k in range(5):
         print '\t--> test case ' + str(k)
         dat = io.loadmat(self.matpath + '/TestPSphericallySymmetric' +
                          str(k) + '.mat',
                          struct_as_record=True)
         truell = np.squeeze(dat['ll'])
         p = Distributions.LpSphericallySymmetric({
             'p':
             dat['p'],
             'n':
             dat['n'],
             'rp':
             Distributions.Gamma({
                 's': dat['s'],
                 'u': dat['u']
             })
         })
         dat = Data(dat['X'])
         ll = p.loglik(dat)
         for i in range(len(ll)):
             self.assertFalse(np.abs(ll[i]-truell[i]) > self.Tol,\
                'Log-likelihood for p-spherically symmetric with radial gamma deviates from test case')
Beispiel #8
0
    def test_DeterminantOfRadialFactorization(self):
        print "Testing Determimant of Radial Factorization ..."
        sys.stdout.flush()
        p = np.random.rand() + 1.0
        psource = Distributions.LpSphericallySymmetric({
            'p':
            p,
            'rp':
            Distributions.Gamma({
                'u': 2.0 * np.random.rand() + 1.0,
                's': 5.0 * np.random.rand() + 1.0
            })
        })

        # L = Auxiliary.LpNestedFunction('(0,0:2)',np.array([p]))
        # psource2 = Distributions.LpNestedSymmetric({'f':L,'n':2.0,'rp':psource.param['rp'].copy()})
        # F2 = NonlinearTransformFactory.LpNestedNonLinearICA(psource2)

        dat = psource.sample(100)
        F = NonlinearTransformFactory.RadialFactorization(psource)

        n, m = dat.size()
        h = 1e-7
        logdetJ = F.logDetJacobian(dat)
        for i in range(m):
            J = np.zeros((n, n))
            for j in range(n):
                tmp = dat[:, i]
                tmp2 = tmp.copy()
                tmp2.X[j, :] = tmp2.X[j, :] + h
                J[:, j] = ((F * tmp2).X - (F * tmp).X)[:, 0] / h
            self.assertFalse( np.abs(np.log(linalg.det(J)) - logdetJ[i]) > self.DetTol,\
                              'Determinant of Jacobian deviates by more than ' + str(self.DetTol) + '!')
Beispiel #9
0
 def test_loglik(self):
     p1 = Distributions.Kumaraswamy({'a': 2.0, 'b': 3.0})
     p2 = Distributions.Kumaraswamy({'a': 1.0, 'b': 1.0})
     nsamples = 1000000
     data = p2.sample(nsamples)
     logZ = logsumexp(p1.loglik(data) - p2.loglik(data) - np.log(nsamples))
     print "Estimated partition function: ", np.exp(logZ)
     self.assertTrue(
         np.abs(np.exp(logZ) - 1.0) < 0.1 * self.TolParam,
         'Difference in estimated partition function (1.0) greater than' +
         str(0.1 * self.TolParam))
Beispiel #10
0
    def test_estimate(self):
        print "Testing parameter estimation of LogNormal distribution ..."
        sys.stdout.flush()
        myu = 10 * np.random.rand(1)[0]
        mys = 10 * np.random.rand(1)[0]
        p = Distributions.LogNormal({'mu': myu, 's': mys})
        dat = p.sample(1000000)
        p = Distributions.LogNormal()
        p.estimate(dat)

        self.assertFalse( np.abs(p.param['mu'] - myu) > self.TolParam,\
            'Difference in location parameter for LogNormal distribution greater than ' + str(self.TolParam))
        self.assertFalse( np.abs(p.param['s'] - mys) > self.TolParam,\
            'Difference in scale parameter for LogNormal distribution greater than ' + str(self.TolParam))
Beispiel #11
0
    def test_estimate(self):
        print "Testing parameter estimation of Dirichlet distribution ..."
        sys.stdout.flush()
        myalpha = 10.0 * np.random.rand(10)
        p = Distributions.Dirichlet({'alpha': myalpha})
        dat = p.sample(50000)
        p = Distributions.Dirichlet({'alpha': np.random.rand(10)})
        p.estimate(dat)
        alpha = p.param['alpha']

        self.assertTrue(
            np.max(np.abs(alpha - myalpha)) < self.TolParam,
            'Difference in alpha parameter for Dirichlet distribution greater than '
            + str(self.TolParam))
Beispiel #12
0
    def test_loglik(self):
        p1 = Distributions.Gamma({'u': 2.0, 's': 3.0})
        p2 = Distributions.Gamma({'u': 1.0, 's': 1.0})
        nsamples = 1000000
        data = p2.sample(nsamples)
        logZ = logsumexp(p1.loglik(data) - p2.loglik(data) - np.log(nsamples))
        print "Estimated partition function: ", np.exp(logZ)

        print "Testing log-likelihood of Gamma distribution ... "
        sys.stdout.flush()
        p = Distributions.Gamma({'u': 2.0, 's': 3.0})
        l = p.loglik(self.X)
        for k in range(len(self.LL)):
            self.assertFalse(np.abs(l[k] - self.LL[k]) > self.Tol,\
               'Difference in log-likelihood for Gamma greater than ' + str(self.Tol))
Beispiel #13
0
    def test_dldtheta(self):
        OK = np.zeros(5)
        for i in range(5):
            self.a = 1.0 * rand()
            self.b = self.a + 10.0 * rand()
            self.mu = (self.a + 10 * rand()) / 2.
            self.s = 2.0 * rand() + 1.0
            self.p = Distributions.TruncatedGaussian({
                'a': self.a,
                'b': self.b,
                'mu': self.mu,
                'sigma': self.s
            })
            p = self.p.copy()
            p.primary = ['mu', 'sigma']
            dat = p.sample(100)

            def f(arr):
                p.array2primary(arr)
                return np.sum(p.loglik(dat))

            def df(arr):
                p.array2primary(arr)
                return np.sum(p.dldtheta(dat), axis=1)

            arr0 = p.primary2array()
            arr0 = abs(np.random.randn(len(arr0)))
            err = optimize.check_grad(f, df, arr0)
            if err < 1e-02:
                OK[i] = 1
        M = np.max(OK)
        self.assertTrue(
            M > 0.5, 'Gradient error %.4g is greater than %.4g' % (err, 1e-02))
    def test_dldtheta(self):
        a = 1.0 * rand()
        b = a
        while b <= a:
            b = 10.0 * rand()
        mu = rand() + 1.0
        s = 10.0 * rand() + 1.0

        p = Distributions.TruncatedExponentialPower({
            'a': a,
            'b': b,
            'p': mu,
            's': s
        })
        p.primary = ['p', 's']
        dat = p.sample(100)

        def f(arr):
            p.array2primary(arr)
            return np.sum(p.loglik(dat))

        def df(arr):
            p.array2primary(arr)
            return np.sum(p.dldtheta(dat), axis=1)

        arr0 = p.primary2array()
        arr0 = abs(np.random.randn(len(arr0)))
        err = optimize.check_grad(f, df, arr0)
        print "Error in gradient: ", err
        self.assertTrue(
            err < 1e-02,
            'Gradient error %.4g is greater than %.4g' % (err, 1e-02))
Beispiel #15
0
 def test_estimate(self):
     print "Testing parameter estimation of Gamma distribution ..."
     sys.stdout.flush()
     myu = 10 * np.random.rand(1)[0]
     mys = 10 * np.random.rand(1)[0]
     p = Distributions.Gamma({'u': myu, 's': mys})
     dat = p.sample(1000000)
     p = Distributions.Gamma()
     p.estimate(dat)
     self.assertFalse(
         np.abs(p.param['u'] - myu) > self.TolParam,
         'Difference in Shape parameter for Gamma distribution greater than '
         + str(self.TolParam))
     self.assertFalse(
         np.abs(p.param['s'] - mys) > self.TolParam,
         'Difference in Scale parameter for Gamma distribution greater than '
         + str(self.TolParam))
Beispiel #16
0
 def test_estimate(self):
     print "Testing parameter estimation of Kumaraswamy distribution ..."
     sys.stdout.flush()
     myu = 10 * rand()
     mys = 10 * rand()
     myB = 10 * rand()
     p = Distributions.Kumaraswamy({'a': myu, 'b': mys, 'B': myB})
     dat = p.sample(50000)
     p = Distributions.Kumaraswamy(B=myB)
     p.estimate(dat)
     self.assertFalse(
         np.abs(p.param['a'] - myu) > self.TolParam,
         'Difference in Shape parameter for Kumaraswamy distribution greater than '
         + str(self.TolParam))
     self.assertFalse(
         np.abs(p.param['b'] - mys) > self.TolParam,
         'Difference in Scale parameter for Kumaraswamy distribution greater than '
         + str(self.TolParam))
Beispiel #17
0
 def test_loglik(self):
     print "Testing log-likelihood of Dirichlet distribution ... "
     sys.stdout.flush()
     p = Distributions.Dirichlet({'alpha': self.alpha})
     l = p.loglik(self.X)
     for k in range(len(self.LL)):
         self.assertTrue(
             np.abs(l[k] - self.LL[k]) < self.Tol,
             'Difference in log-likelihood for Dirichlet greater than ' +
             str(self.Tol))
Beispiel #18
0
    def test_LogDetRadialTransform(self):
        print "Testing logdet of radial transformation ... "
        sys.stdout.flush()
        p = np.random.rand() * 3. + .5
        # source distribution
        psource = Distributions.LpSphericallySymmetric({'p': p})
        # target distribution
        ptarget = Distributions.LpSphericallySymmetric({
            'p':
            p,
            'rp':
            Distributions.Gamma({
                'u': np.random.rand() * 3.0,
                's': np.random.rand() * 2.0
            })
        })
        # create Filter
        F = NonlinearTransformFactory.RadialTransformation(psource, ptarget)
        # sample data from source distribution
        dat = psource.sample(100)

        # apply filter to data
        dat2 = F * dat
        logDetJ = F.logDetJacobian(dat)
        logDetJ2 = 0 * logDetJ

        h = 1e-8

        tmp = Data(dat.X.copy())
        tmp.X[0, :] += h
        W1 = ((F * tmp).X - dat2.X) / h

        tmp = Data(dat.X.copy())
        tmp.X[1, :] += h
        W2 = ((F * tmp).X - dat2.X) / h
        for i in range(dat.numex()):

            logDetJ2[i] = np.log(
                np.abs(W1[0, i] * W2[1, i] - W1[1, i] * W2[0, i]))

        self.assertFalse(np.max(np.abs(logDetJ - logDetJ2)) > self.detTol,\
                         'Log determinant of radial transformation deviates by more than ' + str(self.detTol) + '!')
Beispiel #19
0
 def setUp(self):
     self.a = 1.0 * rand()
     self.b = self.a + 10.0 * rand()
     self.mu = (self.a + 10 * rand()) / 2.
     self.s = 2.0 * rand() + 1.0
     self.p = Distributions.TruncatedGaussian({
         'a': self.a,
         'b': self.b,
         'mu': self.mu,
         'sigma': self.s
     })
Beispiel #20
0
    def test_RadialFactorization(self):
        print "Testing Radial Factorization ..."
        sys.stdout.flush()
        p = np.random.rand() + 1.0
        n = 5
        psource = Distributions.LpSphericallySymmetric({
            'n':
            n,
            'p':
            p,
            'rp':
            Distributions.Gamma({
                'u': 2.0 * np.random.rand() + 1.0,
                's': 5.0 * np.random.rand() + 1.0
            })
        })
        ptarget = Distributions.LpGeneralizedNormal({
            'n':
            n,
            'p':
            p,
            's': (special.gamma(1.0 / p) / special.gamma(3.0 / p))**(p / 2.0)
        })

        F = NonlinearTransformFactory.RadialFactorization(psource)

        dat = psource.sample(10000)
        ld = F.logDetJacobian(dat)
        ld = np.mean(np.abs(ld)) / dat.size(0) / np.log(2)

        all_source = psource.all(dat)
        all_target = ptarget.all(F * dat)

        tol = 1e-2
        prot = {}
        prot['message'] = 'Difference in logdet correted ALL >  ' + str(tol)
        prot["1/n/log(2) * <|det J|> "] = ld
        prot["ALL(TARGET)"] = all_target
        prot["ALL(SOURCE)"] = all_source
        prot[
            "ALL(TARGET) + 1/n/log(2) * <|det J|> - ALL(SOURCE)"] = all_target + ld - all_source
Beispiel #21
0
 def test_derivatives(self):
     print "Testing derivatives w.r.t. data ... "
     sys.stdout.flush()
     p = Distributions.MixtureOfGaussians({'K': 5})
     dat = p.sample(100)
     h = 1e-7
     tol = 1e-6
     y = np.array(dat.X) + h
     df = p.dldx(dat)
     df2 = (p.loglik(Data(y)) - p.loglik(dat)) / h
     self.assertFalse(np.max(np.abs(df-df2)) > tol,\
         'Difference ' +str(np.max(np.abs(df-df2))) +' in derivative of log-likelihood for MixtureOfGaussians greater than ' + str(tol))
Beispiel #22
0
    def test_cdf(self):
        print "Testing consistency of cdf and ppf"
        sys.stdout.flush()
        myu = 10 * rand()
        mys = 10 * rand()
        myB = 10 * rand()
        p = Distributions.Kumaraswamy({'a': myu, 'b': mys, 'B': myB})
        u = rand(10)
        u2 = p.cdf(p.ppf(u))

        self.assertFalse(
            sum(np.abs(u - u2)) > self.TolParam,
            'Difference u - cdf(ppf(u)) greater than %.4g' % (self.Tol, ))
Beispiel #23
0
    def test_logdeterminantInCombinationWithLinearFilters(self):
        print "Testing Log-Determinant of Nonlinear Lp-nested ICA in combination with linear filters..."
        sys.stdout.flush()
        L = Auxiliary.LpNestedFunction()
        p = Distributions.LpNestedSymmetric({'f': L})
        dat = p.sample(10)
        Flin1 = LinearTransformFactory.oRND(dat)
        Flin2 = LinearTransform(
            np.random.randn(dat.size(0), dat.size(0)) +
            0.1 * np.eye(dat.size(0)))
        Fnl = NonlinearTransformFactory.LpNestedNonLinearICA(p)

        Fd = {}
        Fd['NL'] = Fnl

        Fd['L1*L2'] = Flin1 * Flin2
        Fd['L1*NL'] = Flin1 * Fnl
        Fd['NL*L1'] = Fnl * Flin1

        Fd['Nl*L1*L2'] = Fnl * Flin1 * Flin2
        Fd['Nl*(L1*L2)'] = Fnl * (Flin1 * Flin2)
        Fd['(Nl*L1)*L2'] = (Fnl * Flin1) * Flin2

        Fd['L1*Nl*L2'] = Flin1 * Fnl * Flin2
        Fd['L1*(Nl*L2)'] = Flin1 * (Fnl * Flin2)
        Fd['(L1*Nl)*L2'] = (Flin1 * Fnl) * Flin2

        Fd['L2*L1*Nl'] = Flin2 * Flin1 * Fnl
        Fd['L2*(L1*Nl)'] = Flin2 * (Flin1 * Fnl)
        Fd['(L2*L1)*Nl'] = (Flin2 * Flin1) * Fnl

        for (tk, F) in Fd.items():
            print "\t ... testing " + tk
            sys.stdout.flush()
            n, m = dat.size()
            h = 5 * 1e-7

            logdetJ = F.logDetJacobian(dat)
            for i in range(m):
                J = np.zeros((n, n))
                for j in range(n):
                    tmp = dat[:, i]
                    tmp2 = tmp.copy()
                    tmp2.X[j, :] = tmp2.X[j, :] + h

                    J[:, j] = ((F * tmp2).X - (F * tmp).X)[:, 0] / h
                Q, R = linalg.qr(J)
                logdet2 = np.sum(np.log(np.diag(R)))
                #print np.abs(logdet2 - logdetJ[i])
                self.assertFalse(np.abs(logdet2 - logdetJ[i]) > self.DetTol,\
                    'Determinant of Jacobian deviates by %.4g which is more than more than %.4g' % (np.abs(logdet2 - logdetJ[i]), self.DetTol))
Beispiel #24
0
 def test_derivatives(self):
     print "Testing derivatives w.r.t. data ... "
     sys.stdout.flush()
     myu = 3.0 * np.random.rand(1)[0] + 1.0
     mys = 3.0 * np.random.rand(1)[0] + 1.0
     p = Distributions.Gamma({'u': myu, 's': mys})
     dat = p.sample(100)
     h = 1e-7
     tol = 1e-4
     y = np.array(dat.X) + h
     df = p.dldx(dat)
     df2 = (p.loglik(Data(y)) - p.loglik(dat)) / h
     self.assertFalse(np.max(np.abs(df-df2)) > tol,\
                      'Difference ' + str(np.max(np.abs(df-df2)))+ 'in derivative of log-likelihood for Gamma greater than ' + str(tol))
Beispiel #25
0
 def test_estimate(self):
     print 'Testing parameter estimation for p-generalized normal distribution'
     sys.stdout.flush()
     for k in range(5):
         print '\t--> test case ' + str(k)
         dat = io.loadmat(self.matpath + '/TestPGeneralizedNormal' +
                          str(k) + '.mat',
                          struct_as_record=True)
         trueparam = {'s': 2 * dat['s'], 'p': dat['p'], 'n': dat['n']}
         p = Distributions.LpGeneralizedNormal({'n': dat['n']})
         dat = Data(dat['X'])
         p.estimate(dat)
         for ke in trueparam.keys():
             self.assertFalse( np.abs(trueparam[ke] -  p.param[ke]) > self.TolParam[ke],\
                 'Estimated parameter ' + ke + ' deviates by more than ' + str(self.TolParam[ke]) + '!')
Beispiel #26
0
    def test_dldtheta(self):
        p = Distributions.Gamma({'u': 2.0, 's': 3.0})
        p.primary = ['u', 's']
        dat = p.sample(1000)

        def f(arr):
            p.array2primary(arr)
            return np.sum(p.loglik(dat))

        def df(arr):
            p.array2primary(arr)
            return np.sum(p.dldtheta(dat), axis=1)

        arr0 = p.primary2array()
        arr0 = abs(np.random.randn(len(arr0)))
        err = optimize.check_grad(f, df, arr0)
        print "Error in graident: ", err
        self.assertTrue(err < 1e-02)
Beispiel #27
0
 def test_loglik(self):
     print 'Testing log-likelihood of p-generalized normal distribution'
     print __file__
     sys.stdout.flush()
     for k in range(5):
         print '\t--> test case ' + str(k)
         dat = io.loadmat(self.matpath + '/TestPGeneralizedNormal' +
                          str(k) + '.mat',
                          struct_as_record=True)
         truell = dat['ll']
         p = Distributions.LpGeneralizedNormal({
             's': 2 * dat['s'],
             'p': dat['p'],
             'n': dat['n']
         })
         dat = Data(dat['X'])
         ll = p.loglik(dat)
         for i in range(ll.shape[0]):
             self.assertFalse( np.any(np.abs(ll[i]-np.squeeze(truell[0,i])) > self.Tol),\
                 'Log-likelihood for p-generalized normal deviates from test case')
    def test_pdfloglikconsistency(self):
        print "Testing consistency of pdf and loglik  ... "
        sys.stdout.flush()

        p = Distributions.MixtureOfLogNormals({'K': 5})
        dat = p.sample(100)

        tol = 1e-6
        ll = p.loglik(dat)
        pdf = np.log(p.pdf(dat))

        prot = {}
        prot[
            'message'] = 'Difference in log(p(x)) and loglik(x) MixtureOfLogNormals greater than ' + str(
                tol)
        prot['max diff'] = np.max(np.abs(pdf - ll))
        prot['mean diff'] = np.mean(np.abs(pdf - ll))

        self.assertFalse(
            np.max(np.abs(ll - pdf)) > tol, Auxiliary.prettyPrintDict(prot))
    def test_derivatives(self):
        print "Testing derivatives w.r.t. data ... "
        sys.stdout.flush()
        p = Distributions.MixtureOfLogNormals({'K': 5})
        dat = p.sample(100)
        h = 1e-8
        tol = 1e-4
        y = np.array(dat.X) + h
        df = p.dldx(dat)
        df2 = (p.loglik(Data(y)) - p.loglik(dat)) / h

        prot = {}
        prot[
            'message'] = 'Difference in derivative of log-likelihood for MixtureOfLogNormals greater than ' + str(
                tol)
        prot['max diff'] = np.max(np.abs(df - df2))
        prot['mean diff'] = np.mean(np.abs(df - df2))

        self.assertFalse(
            np.mean(np.abs(df - df2)) > tol, Auxiliary.prettyPrintDict(prot))
Beispiel #30
0
    def test_estimate(self):
        print 'Testing parameter estimation for p-spherically symmetric distribution with radial gamma'
        sys.stdout.flush()
        for k in range(5):
            print '\t--> test case ' + str(k)
            sys.stdout.flush()
            dat = io.loadmat(self.matpath + '/TestPSphericallySymmetric' +
                             str(k) + '.mat',
                             struct_as_record=True)
            trueparam = {'s': dat['s'], 'p': dat['p'], 'u': dat['u']}
            p = Distributions.LpSphericallySymmetric({'n': dat['n']})
            dat = Data(dat['X'])
            p.estimate(dat, prange=(.1, 4.0))

            self.assertFalse(np.abs(trueparam['p'] -  p.param['p']) > self.TolParam['p'],\
               'Estimated parameter p deviates by more than ' + str(self.TolParam['p']) + '!')
            self.assertFalse(np.abs(trueparam['u'] -  p.param['rp'].param['u']) > self.TolParam['u'],\
               'Estimated parameter u deviates by more than ' + str(self.TolParam['u']) + '!')
            self.assertFalse(np.abs(trueparam['s'] -  p.param['rp'].param['s']) > self.TolParam['s'],\
               'Estimated parameter s deviates by more than ' + str(self.TolParam['s']) + '!')