Exemple #1
0
    def testRecoverGlobalSticksFromGeneratedData(self):
        ''' Verify that mean of V_d matrix is equal to original vector u
        '''
        print ''
        gamma = 1.0
        for K in [1, 10, 107]:
            for alpha in [0.95, 0.5]:
                for nDoc in [10000]:
                    print '================== K %d | alpha %.2f | nDoc %d' \
                          % (K, alpha, nDoc)

                    for seed in [111, 222, 333]:

                        PRNG = np.random.RandomState(seed)
                        u_true = np.linspace(0.01, 0.99, K)
                        Vd = sampleVd(u_true, nDoc, alpha, PRNG=PRNG)

                        assert Vd.shape[0] == nDoc
                        assert Vd.shape[1] == K
                        assert Vd.ndim == 2
                        meanVd = np.mean(Vd, axis=0)
                        print '    u   1:10 ', np2flatstr(u_true)
                        print ' E[v_d] 1:10 ', np2flatstr(meanVd)
                        if K > 10:
                            print '    u   -10: ', np2flatstr(u_true[-10:])
                            print ' E[v_d] -10: ', np2flatstr(meanVd[-10:])
                        assert np.allclose(u_true, meanVd, atol=0.02)
Exemple #2
0
    def testHasSaneOutput__objFunc_constrained(self, hmmKappa=10.0):
        ''' Verify objective and gradient vector have correct type and size
        '''
        for K in [1, 2, 10]:
            for alpha in [0.1, 0.9]:
                for seed in [333, 777, 888]:

                    PRNG = np.random.RandomState(seed)
                    u = np.linspace(0.1, 0.9, K)
                    Vd = sampleVd(u, K + 1, alpha, PRNG=PRNG)
                    sumLogPi = summarizeVdToPi(Vd)

                    # Randomly initialize rho and omega
                    rho = PRNG.rand(K)
                    omega = K * PRNG.rand(K)
                    rhoomega = np.hstack([rho, omega])

                    kwargs = dict(alpha=alpha,
                                  gamma=1,
                                  nDoc=K + 1,
                                  kappa=hmmKappa,
                                  sumLogPi=sumLogPi)

                    # Compute objective function
                    # and its gradient (if approximation not occuring)
                    for approx_grad in [0, 1]:
                        if approx_grad:
                            f = OptimizerRhoOmega.objFunc_constrained(
                                rhoomega, approx_grad=1, **kwargs)
                            fapprox = f

                        else:
                            f, g = OptimizerRhoOmega.objFunc_constrained(
                                rhoomega, approx_grad=0, **kwargs)
                            fexact = f
                        assert isinstance(f, np.float64)
                        assert np.isfinite(f)

                        if not approx_grad:
                            assert g.ndim == 1
                            assert g.size == 2 * K
                            assert np.all(np.isfinite(g))

                    print fexact
                    print fapprox
                    print ''
                    assert np.allclose(fexact, fapprox)
Exemple #3
0
    def testGradientExactAndApproxAgree__objFunc_constrained(
            self, hmmKappa=100):
        ''' Verify computed gradient similar for exact and approx methods
        '''
        print ''
        for K in [1, 2, 10]:
            for gamma in [1.0, 2.0, 6.28]:
                for alpha in [0.1, 0.9, 1.5]:
                    for seed in [333, 777, 888]:

                        PRNG = np.random.RandomState(seed)
                        u = np.linspace(0.1, 0.9, K)
                        Vd = sampleVd(u, K + 1, alpha, PRNG=PRNG)
                        sumLogPi = summarizeVdToPi(Vd)

                        # Randomly initialize rho and omega
                        rho = PRNG.rand(K)
                        omega = K * PRNG.rand(K)
                        rhoomega = np.hstack([rho, omega])

                        kwargs = dict(alpha=alpha,
                                      gamma=1,
                                      nDoc=K + 1,
                                      kappa=hmmKappa,
                                      sumLogPi=sumLogPi)

                        # Exact gradient
                        f, g = OptimizerRhoOmega.objFunc_constrained(
                            rhoomega, approx_grad=0, **kwargs)

                        # Approx gradient
                        oFunc_cons = OptimizerRhoOmega.objFunc_constrained

                        def objFunc(x):
                            return oFunc_cons(x, approx_grad=1, **kwargs)

                        epsvec = np.hstack(
                            [1e-8 * np.ones(K), 1e-8 * np.ones(K)])
                        gapprox = approx_fprime(rhoomega, objFunc, epsvec)

                        print np2flatstr(g)
                        print np2flatstr(gapprox)
                        print ''
                        assert np.allclose(g, gapprox, atol=0, rtol=0.001)
Exemple #4
0
    def testHasSaneOutput__objFunc_constrained(self):
        ''' Verify objective and gradient vector have correct type and size
        '''
        for K in [1, 2, 10, 101]:
            for seed in [33, 77, 888]:
                for alpha in [0.1, 0.9]:
                    for nDoc in [1, 50, 5000]:
                        PRNG = np.random.RandomState(seed)
                        u = np.linspace(0.1, 0.9, K)
                        Vd = sampleVd(u, nDoc, alpha, PRNG=PRNG)
                        sumLogPi = summarizeVdToPi(Vd)
                        rho = PRNG.rand(K)
                        omega = nDoc * PRNG.rand(K)
                        for approx_grad in [0, 1]:
                            rhoomega = np.hstack([rho, omega])
                            kwargs = dict(alpha=0.5,
                                          gamma=1,
                                          nDoc=nDoc,
                                          sumLogPi=sumLogPi)
                            if approx_grad:
                                f = OptimizerRhoOmega.objFunc_constrained(
                                    rhoomega, approx_grad=1, **kwargs)
                                g = np.ones(2 * K)
                                fapprox = f

                            else:
                                f, g = OptimizerRhoOmega.objFunc_constrained(
                                    rhoomega, approx_grad=0, **kwargs)
                                fexact = f
                            assert isinstance(f, np.float64)
                            assert g.ndim == 1
                            assert g.size == 2 * K
                            assert np.isfinite(f)
                            assert np.all(np.isfinite(g))
                        print fexact
                        print fapprox
                        print ''
Exemple #5
0
    def testRecoverRhoThatGeneratedData__find_optimum(self):
        ''' Verify find_optimum's result is indistiguishable from analytic opt
        '''
        print ''
        gamma = 1.0
        for K in [93, 107, 85]:  # , 10, 107]:
            for alpha in [0.9999]:
                for nDoc in [10000]:
                    print '============== K %d | alpha %.2f | nDoc %d' \
                          % (K, alpha, nDoc)

                    for seed in [111, 222, 333]:

                        PRNG = np.random.RandomState(seed)
                        u_true = np.linspace(0.01, 0.99, K)
                        Vd = sampleVd(u_true, nDoc, alpha, PRNG=PRNG)
                        sumLogPi = summarizeVdToPi(Vd)

                        initrho = PRNG.rand(K)
                        initomega = 100 * PRNG.rand(K)
                        scale = 1.0  # float(1+nDoc)/K
                        kwargs = dict(alpha=alpha,
                                      gamma=gamma,
                                      nDoc=nDoc,
                                      scaleVector=np.hstack([
                                          np.ones(K),
                                          float(scale) * np.ones(K)
                                      ]),
                                      sumLogPi=sumLogPi)
                        rho_est, omega_est, f_est, Info = \
                            OptimizerRhoOmega.find_optimum_multiple_tries(
                                initrho=initrho,
                                initomega=initomega,
                                **kwargs)
                        assert np.all(np.isfinite(rho_est))
                        assert np.all(np.isfinite(omega_est))
                        assert np.isfinite(f_est)
                        print Info['msg']

                        rho_orig = u_true
                        omega_orig = (1 + gamma) * np.ones(K)
                        ro_orig = np.hstack([rho_orig, omega_orig])
                        rho_hot, omega_hot, f_hot, Ihot = \
                            OptimizerRhoOmega.find_optimum_multiple_tries(
                                initrho=rho_orig,
                                initomega=omega_orig,
                                **kwargs)

                        f_orig, _ = OptimizerRhoOmega.objFunc_constrained(
                            ro_orig, **kwargs)
                        print '  f_orig %.7f' % (f_orig)
                        print '  f_hot  %.7f' % (f_hot)
                        print '  f_est  %.7f' % (f_est)

                        print '  rho_orig', np2flatstr(rho_orig, fmt='%9.6f')
                        print '  rho_hot ', np2flatstr(rho_hot, fmt='%9.6f')
                        print '  rho_est ', np2flatstr(rho_est, fmt='%9.6f')

                        assert f_hot <= f_orig
                        assert np.allclose(f_est, f_hot, rtol=0.01)
                        assert np.allclose(rho_est,
                                           rho_hot,
                                           atol=0.02,
                                           rtol=1e-5)
Exemple #6
0
    def testGradientExactAndApproxAgree__sumLogPiRemVec(self):
        ''' Verify computed gradient similar for exact and approx methods
        '''
        print ''
        for K in [1, 2, 10, 54]:
            for alpha in [0.1, 0.95]:
                for gamma in [1., 9.45]:
                    for nDoc in [1, 100, 1000]:

                        print '============= K %d | nDoc %d | alpha %.2f' \
                              % (K, nDoc, alpha)

                        for seed in [111, 222, 333]:
                            PRNG = np.random.RandomState(seed)
                            u = np.linspace(0.01, 0.99, K)
                            Vd = sampleVd(u, nDoc, alpha, PRNG=PRNG)
                            sumLogPi = summarizeVdToPi(Vd)
                            sumLogPiRemVec = np.zeros(K)
                            sumLogPiActiveVec = np.zeros(K)
                            sumLogPiActiveVec[:] = sumLogPi[:-1]
                            sumLogPiRemVec[-1] = sumLogPi[-1]

                            rho = PRNG.rand(K)
                            omega = 100 * PRNG.rand(K)
                            rhoomega = np.hstack([rho, omega])
                            kwargs = dict(alpha=alpha,
                                          gamma=gamma,
                                          nDoc=nDoc,
                                          sumLogPiActiveVec=sumLogPiActiveVec,
                                          sumLogPiRemVec=sumLogPiRemVec)

                            # Exact gradient
                            f, g = OptimizerRhoOmega.objFunc_constrained(
                                rhoomega, approx_grad=0, **kwargs)

                            # Approx gradient
                            oFunc_cons = OptimizerRhoOmega.objFunc_constrained
                            objFunc = lambda x: oFunc_cons(
                                x, approx_grad=1, **kwargs)
                            epsvec = np.hstack(
                                [1e-8 * np.ones(K), 1e-8 * np.ones(K)])
                            gapprox = approx_fprime(rhoomega, objFunc, epsvec)

                            print '      rho 1:10 ', np2flatstr(rho)
                            print '     grad 1:10 ', np2flatstr(g[:K],
                                                                fmt='% .6e')
                            print ' autograd 1:10 ', np2flatstr(gapprox[:K],
                                                                fmt='% .6e')
                            if K > 10:
                                print '     rho K-10:K ', np2flatstr(rho[-10:])
                                print '    grad K-10:K ', np2flatstr(
                                    g[K - 10:K], fmt='% .6e')
                                print 'autograd K-10:K ', np2flatstr(
                                    gapprox[K - 10:K], fmt='% .6e')
                            rtol_rho = 0.01
                            atol_rho = 1e-6
                            rtol_omega = 0.05
                            atol_omega = 0.01
                            # Note: small omega derivas cause lots of problems
                            # so we should use high atol to avoid these issues
                            assert np.allclose(g[:K],
                                               gapprox[:K],
                                               atol=atol_rho,
                                               rtol=rtol_rho)
                            oGradOK = np.allclose(g[K:],
                                                  gapprox[K:],
                                                  atol=atol_omega,
                                                  rtol=rtol_omega)
                            if not oGradOK:
                                print 'VIOLATION DETECTED!'
                                print 'grad_approx DOES NOT EQUAL grad_exact'

                                absDiff = np.abs(g[K:] - gapprox[K:])
                                tolDiff = (atol_omega +
                                           rtol_omega * np.abs(gapprox[K:]) -
                                           absDiff)
                                worstIDs = np.argsort(tolDiff)
                                print 'Top 5 worst mismatches'
                                print np2flatstr(g[K + worstIDs[:5]],
                                                 fmt='% .6f')
                                print np2flatstr(gapprox[K + worstIDs[:5]],
                                                 fmt='% .6f')
                            assert oGradOK