예제 #1
0
def cluster(data, k, mode = 'full'):
    d = data.shape[1]
    gm = GM(d, k, mode)
    gmm = GMM(gm)
    em = EM()
    em.train(data, gmm, maxiter = 20)
    return gm, gmm.bic(data)
예제 #2
0
def cluster(data, k, mode='full'):
    d = data.shape[1]
    gm = GM(d, k, mode)
    gmm = GMM(gm, 'random')
    em = RegularizedEM(pcnt=pcnt, pval=pval)
    em.train(data, gmm, maxiter=20)
    return gm, gmm.bic(data)
예제 #3
0
def cluster(data, k):
    d = data.shape[1]
    gm = GM(d, k)
    gmm = GMM(gm)
    em = EM()
    em.train(data, gmm, maxiter=20)
    return gm, gmm.bic(data)
def cluster(data, k, mode = 'full'):
    d = data.shape[1]
    gm = GM(d, k, mode)
    gmm = GMM(gm, 'random')
    em = RegularizedEM(pcnt = pcnt, pval = pval)
    em.train(data, gmm, maxiter = 20)
    return gm, gmm.bic(data)
def cluster(data, k, mode='full'):
    d = data.shape[1]
    gm = GM(d, k, mode)
    gmm = GMM(gm)
    em = EM()
    em.train(data, gmm, maxiter=20)
    return gm
예제 #6
0
    def _create_model(self, d, k, mode, nframes, emiter):
        #+++++++++++++++++++++++++++++++++++++++++++++++++
        # Generate a model with k components, d dimensions
        #+++++++++++++++++++++++++++++++++++++++++++++++++
        w, mu, va = GM.gen_param(d, k, mode, spread=1.5)
        gm = GM.fromvalues(w, mu, va)
        # Sample nframes frames  from the model
        data = gm.sample(nframes)

        #++++++++++++++++++++++++++++++++++++++++++
        # Approximate the models with classical EM
        #++++++++++++++++++++++++++++++++++++++++++
        # Init the model
        lgm = GM(d, k, mode)
        gmm = GMM(lgm, 'kmean')
        gmm.init(data, niter=KM_ITER)

        self.gm0 = copy.copy(gmm.gm)
        # The actual EM, with likelihood computation
        for i in range(emiter):
            g, tgd = gmm.compute_responsabilities(data)
            gmm.update_em(data, g)

        self.data = data
        self.gm = lgm
예제 #7
0
def old_em(data, w, mu, va, niter):
    from scikits.learn.machine.em import EM as OEM, GMM as OGMM, GM as OGM

    k = w.size
    k = mu.shape[0]

    lgm = OGM(d, k)
    gmm = OGMM(lgm)

    gmm.gm.w = w.copy()
    gmm.gm.mu = mu.copy()
    gmm.gm.va = va.copy()

    for i in range(niter):
        g, tgd = gmm.compute_responsabilities(data)
        gmm.update_em(data, g)

    return gmm.gm
예제 #8
0
    def _test(self, dataset, log):
        dic = load_dataset(dataset)

        gm = GM.fromvalues(dic['w0'], dic['mu0'], dic['va0'])
        gmm = GMM(gm, 'test')
        EM().train(dic['data'], gmm, log = log)

        assert_array_almost_equal(gmm.gm.w, dic['w'], DEF_DEC)
        assert_array_almost_equal(gmm.gm.mu, dic['mu'], DEF_DEC)
        assert_array_almost_equal(gmm.gm.va, dic['va'], DEF_DEC)
예제 #9
0
    def _test_common(self, d, k, mode):
        dic = load_dataset('%s_%dd_%dk.mat' % (mode, d, k))

        gm = GM.fromvalues(dic['w0'], dic['mu0'], dic['va0'])
        gmm = GMM(gm, 'test')

        a, na = gmm.compute_responsabilities(dic['data'])
        la, nla = gmm.compute_log_responsabilities(dic['data'])

        ta = N.log(a)
        tna = N.log(na)
        if not N.all(N.isfinite(ta)):
            print "precision problem for %s, %dd, %dk, test need fixing" % (mode, d, k)
        else:
            assert_array_almost_equal(ta, la, DEF_DEC)

        if not N.all(N.isfinite(tna)):
            print "precision problem for %s, %dd, %dk, test need fixing" % (mode, d, k)
        else:
            assert_array_almost_equal(tna, nla, DEF_DEC)
def generate_dataset(d, k, mode, nframes):
    """Generate a dataset useful for EM anf GMM testing.
    
    returns:
        data : ndarray
            data from the true model.
        tgm : GM
            the true model (randomly generated)
        gm0 : GM
            the initial model
        gm : GM
            the trained model
    """
    # Generate a model
    w, mu, va = GM.gen_param(d, k, mode, spread=2.0)
    tgm = GM.fromvalues(w, mu, va)

    # Generate data from the model
    data = tgm.sample(nframes)

    # Run EM on the model, by running the initialization separetely.
    gmm = GMM(GM(d, k, mode), 'test')
    gmm.init_random(data)
    gm0 = copy.copy(gmm.gm)

    gmm = GMM(copy.copy(gmm.gm), 'test')
    em = EM()
    em.train(data, gmm)

    return data, tgm, gm0, gmm.gm
예제 #11
0
    def _create_model_and_run_em(self, d, k, mode, nframes):
        #+++++++++++++++++++++++++++++++++++++++++++++++++
        # Generate a model with k components, d dimensions
        #+++++++++++++++++++++++++++++++++++++++++++++++++
        w, mu, va   = GM.gen_param(d, k, mode, spread = 1.5)
        gm          = GM.fromvalues(w, mu, va)
        # Sample nframes frames  from the model
        data        = gm.sample(nframes)

        #++++++++++++++++++++++++++++++++++++++++++
        # Approximate the models with classical EM
        #++++++++++++++++++++++++++++++++++++++++++
        # Init the model
        lgm = GM(d, k, mode)
        gmm = GMM(lgm, 'kmean')

        em  = EM()
        lk  = em.train(data, gmm)
예제 #12
0
k       = 2
d       = 2
mode    = 'diag'
nframes = 1e3

#+++++++++++++++++++++++++++++++++++++++++++
# Create an artificial GM model, samples it
#+++++++++++++++++++++++++++++++++++++++++++
w, mu, va   = GM.gen_param(d, k, mode, spread = 1.5)
gm          = GM.fromvalues(w, mu, va)

# Sample nframes frames  from the model
data    = gm.sample(nframes)

#++++++++++++++++++++++++
# Learn the model with EM
#++++++++++++++++++++++++

# Create a Model from a Gaussian mixture with kmean initialization
lgm = GM(d, k, mode)
gmm = GMM(lgm, 'kmean')

# The actual EM, with likelihood computation. The threshold
# is compared to the (linearly appromixated) derivative of the likelihood
em      = EM()
like    = em.train(data, gmm, maxiter = 30, thresh = 1e-8)

# The computed parameters are in gmm.gm, which is the same than lgm
# (remember, python does not copy most objects by default). You can for example
# plot lgm against gm to compare
예제 #13
0
# Sample nframes frames  from the model
data    = gm.sample(nframes)

#++++++++++++++++++++++++
# Learn the model with EM
#++++++++++++++++++++++++

# List of learned mixtures lgm[i] is a mixture with i+1 components
lgm     = []
kmax    = 6
bics    = N.zeros(kmax)
em      = EM()
for i in range(kmax):
    lgm.append(GM(d, i+1, mode))

    gmm = GMM(lgm[i], 'kmean')
    em.train(data, gmm, maxiter = 30, thresh = 1e-10)
    bics[i] = gmm.bic(data)

print "Original model has %d clusters, bics says %d" % (k, N.argmax(bics)+1)

#+++++++++++++++
# Draw the model
#+++++++++++++++
import pylab as P
P.subplot(3, 2, 1)

for k in range(kmax):
    P.subplot(3, 2, k+1)
    level   = 0.9
    P.plot(data[:, 0], data[:, 1], '.', label = '_nolegend_')