Пример #1
0
def test1():
    print '============== test1 ============'
    dimension = 3
    target0 = make_target(dimension, 2, (0.75, 0.25), ((1, 1, 1), (2, 3, 4)), ((1, 1, 1), (0.5, 0.5, 1)))
    target1 = make_target(dimension, 2, (0.5, 0.5), ((-1, -1, -1), (-2, -3, -4)), ((1, 1, 1), (0.5, 0.5, 1)))
    target2 = make_target(dimension, 2, (0.1, 0.9), ((1, 1, -2), (3, 3, 5)), ((1, 1, 1), (0.5, 0.5, 1)))
    print target0
    print target1
    print target2
    GaussianModelBase.seed(0)
    
    labels = ('A', 'B', 'C')
    ncomps = (1, 2, 2)

    sources = dict((('A', target0), ('B', target1), ('C', target2)))

    GaussianMixtureModel.seed(0)
    gmm_mgr = GmmMgr(ncomps, dimension, GaussianModelBase.DIAGONAL_COVARIANCE)
    c0 = AdaptingGmmClassifier(gmm_mgr, izip(labels, count()))
    print
    print c0

    result = list()
    proc0 = AdaptingGmmClassProcessor(c0, result.append)

    # Prime things a little bit to try to get a good start
    c0.set_relevance(0.001)
    c0.set_num_em_iterations(2)
    for i in xrange(1):
        for label in labels:
           target = sources[label]
           data = (target.sample() for i in xrange(100))
           proc0.process((label, data))

    # Now adapt on more data
    c0.set_relevance(10)
    c0.set_num_em_iterations(2)
    for i in xrange(10):
        for label in labels:
           target = sources[label]
           data = (target.sample() for i in xrange(100))
           proc0.process((label, data))
           
    print
    print c0
    print
    print len(result)
    # XXX Win32 gets values off in the last 2-3 hex digits.  I'm not sure how to account for this in a
    # logref test, so I'm disabling this printing for now.
    
    # for training_label, scores in result[-10:]:
    #     print training_label, tuple(((label, float_to_readable_string(score)) for score, label in scores))
    correct = tuple(label for label, scores in result)
    guessed = tuple(scores[0][1] for l, scores in result)
    print len(correct), len(guessed)
    ind = [c == g for (c, g) in izip(correct, guessed)]
    print ind.count(True)
    print ind.count(True) / len(correct)
Пример #2
0
def test1(num_obs, num_passes):
    dimension = 2
    
    # Data generator setup
    target_means = (1,1)
    target_vars = (0.1,0.1)
    generator = SimpleGaussianModel(dimension, SimpleGaussianModel.DIAGONAL_COVARIANCE)
    generator.set_model(target_means, target_vars)

    SimpleGaussianModel.seed(0)
    GaussianMixtureModel.seed(0)

    # Gmm setup
    num_mixtures = 2
    gmm0 = make_gmm(dimension, num_mixtures)
    gmm1 = make_gmm(dimension, num_mixtures)
    mm = GmmMgr((gmm1,))

    # Hmm setup
    hmm0 = Hmm(1, log_domain=True)

    # A transition probability matrix with a p=1/2 exit for the real state.
    # The entry state feeds into the real state with p=1.
    trans = array(((0.0, 1.0, 0.0),
                   (0.0, 0.5, 0.5),
                   (0.0, 0.0, 0.0)))

    
    hmm0.build_model(mm, (0,), 1, 1, trans)
    print hmm0.to_string(True)
    print gmm0

    # Try some adaptation.  Note that we are feeding the entire data set as one stream
    # to the Hmm adaption call.
    data = [generator.sample() for i in xrange(num_obs)]
    for p in xrange(num_passes):
        mm.set_adaptation_state("INITIALIZING")
        mm.clear_all_accumulators()
        hmm0.begin_adapt("STANDALONE")
        mm.set_adaptation_state("ACCUMULATING")
        hmm0.adapt_one_sequence(data)
        mm.set_adaptation_state("APPLYING")
        hmm0.end_adapt()
        mm.apply_all_accumulators()
        mm.set_adaptation_state("NOT_ADAPTING")
    gmm0.adapt(data, max_iters = num_passes)

    print hmm0.to_string(True)
    print gmm0
Пример #3
0
def test0():
    print '============== test0 ============'
    dimension = 3
    target0 = make_target(dimension, 2, (0.75, 0.25), ((1, 1, 1), (2, 3, 4)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    target1 = make_target(dimension, 2, (0.5, 0.5),
                          ((-1, -1, -1), (-2, -3, -4)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    target2 = make_target(dimension, 2, (0.1, 0.9), ((1, 1, -2), (3, 3, 5)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    print target0
    print target1
    print target2
    GaussianModelBase.seed(0)

    labels = ('A', 'B', 'C')
    ncomps = (1, 2, 2)

    sources = dict((('A', target0), ('B', target1), ('C', target2)))

    GaussianMixtureModel.seed(0)
    gmm_mgr = GmmMgr(ncomps, dimension, GaussianModelBase.DIAGONAL_COVARIANCE)
    c0 = AdaptingGmmClassifier(gmm_mgr, izip(labels, count()))
    print
    print c0

    # Prime things a little bit to try to get a good start
    c0.set_relevance(0.001)
    for i in xrange(1):
        for label in labels:
            target = sources[label]
            data = (target.sample() for i in xrange(100))
            c0.adapt_one_class(label, data)

    # Now adapt on more data
    c0.set_relevance(10)
    for i in xrange(10):
        for label in labels:
            target = sources[label]
            data = (target.sample() for i in xrange(100))
            c0.adapt_one_class(label, data)

    print
    print c0
    print
Пример #4
0
def test0():
    print '============== test0 ============'
    dimension = 3
    target0 = make_target(dimension, 2, (0.75, 0.25), ((1, 1, 1), (2, 3, 4)), ((1, 1, 1), (0.5, 0.5, 1)))
    target1 = make_target(dimension, 2, (0.5, 0.5), ((-1, -1, -1), (-2, -3, -4)), ((1, 1, 1), (0.5, 0.5, 1)))
    target2 = make_target(dimension, 2, (0.1, 0.9), ((1, 1, -2), (3, 3, 5)), ((1, 1, 1), (0.5, 0.5, 1)))
    print target0
    print target1
    print target2
    GaussianModelBase.seed(0)
    
    labels = ('A', 'B', 'C')
    ncomps = (1, 2, 2)

    sources = dict((('A', target0), ('B', target1), ('C', target2)))

    GaussianMixtureModel.seed(0)
    gmm_mgr = GmmMgr(ncomps, dimension, GaussianModelBase.DIAGONAL_COVARIANCE)
    c0 = AdaptingGmmClassifier(gmm_mgr, izip(labels, count()))
    print
    print c0


    # Prime things a little bit to try to get a good start
    c0.set_relevance(0.001)
    for i in xrange(1):
        for label in labels:
           target = sources[label]
           data = (target.sample() for i in xrange(100))
           c0.adapt_one_class(label, data)

    # Now adapt on more data
    c0.set_relevance(10)
    for i in xrange(10):
        for label in labels:
           target = sources[label]
           data = (target.sample() for i in xrange(100))
           c0.adapt_one_class(label, data)
           
    print
    print c0
    print
Пример #5
0
def test0(num_obs, num_passes):
    dimension = 2
    
    # Data generator setup
    target_means = (1,1)
    target_vars = (0.1,0.1)
    generator = SimpleGaussianModel(dimension, SimpleGaussianModel.DIAGONAL_COVARIANCE)
    generator.set_model(target_means, target_vars)

    SimpleGaussianModel.seed(0)
    GaussianMixtureModel.seed(0)

    mm = GmmMgr(dimension)

    # Hmm setup
    hmm0 = Hmm(0, log_domain=True)

    # A transition probability matrix with no real state.
    # The entry state feeds into the exit state with p=1.
    trans = array(((0.0, 1.0),
                   (0.0, 0.0)))
    
    hmm0.build_model(mm, (), 1, 1, trans)
    print hmm0.to_string(True)

    # Try some adaptation.  Note that we are feeding the entire data set as one stream
    # to the Hmm adaption call.
    data = [generator.sample() for i in xrange(num_obs)]
    for p in xrange(num_passes):
        mm.set_adaptation_state("INITIALIZING")
        mm.clear_all_accumulators()
        hmm0.begin_adapt("STANDALONE")
        mm.set_adaptation_state("ACCUMULATING")
        with DebugPrint("hmm_gxfs", "hmm_aos") if False else DebugPrint():
            hmm0.adapt_one_sequence(data)
        mm.set_adaptation_state("APPLYING")
        hmm0.end_adapt()
        mm.apply_all_accumulators()
        mm.set_adaptation_state("NOT_ADAPTING")

    print hmm0.to_string(True)
Пример #6
0
def test1():
    print '============== test1 ============'
    dimension = 3
    target0 = make_target(dimension, 2, (0.75, 0.25), ((1, 1, 1), (2, 3, 4)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    target1 = make_target(dimension, 2, (0.5, 0.5),
                          ((-1, -1, -1), (-2, -3, -4)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    target2 = make_target(dimension, 2, (0.1, 0.9), ((1, 1, -2), (3, 3, 5)),
                          ((1, 1, 1), (0.5, 0.5, 1)))
    print target0
    print target1
    print target2
    GaussianModelBase.seed(0)

    labels = ('A', 'B', 'C')
    ncomps = (1, 2, 2)

    sources = dict((('A', target0), ('B', target1), ('C', target2)))

    GaussianMixtureModel.seed(0)
    gmm_mgr = GmmMgr(ncomps, dimension, GaussianModelBase.DIAGONAL_COVARIANCE)
    c0 = AdaptingGmmClassifier(gmm_mgr, izip(labels, count()))
    print
    print c0

    result = list()
    proc0 = AdaptingGmmClassProcessor(c0, result.append)

    # Prime things a little bit to try to get a good start
    c0.set_relevance(0.001)
    c0.set_num_em_iterations(2)
    for i in xrange(1):
        for label in labels:
            target = sources[label]
            data = (target.sample() for i in xrange(100))
            proc0.process((label, data))

    # Now adapt on more data
    c0.set_relevance(10)
    c0.set_num_em_iterations(2)
    for i in xrange(10):
        for label in labels:
            target = sources[label]
            data = (target.sample() for i in xrange(100))
            proc0.process((label, data))

    print
    print c0
    print
    print len(result)
    # XXX Win32 gets values off in the last 2-3 hex digits.  I'm not sure how to account for this in a
    # logref test, so I'm disabling this printing for now.

    # for training_label, scores in result[-10:]:
    #     print training_label, tuple(((label, float_to_readable_string(score)) for score, label in scores))
    correct = tuple(label for label, scores in result)
    guessed = tuple(scores[0][1] for l, scores in result)
    print len(correct), len(guessed)
    ind = [c == g for (c, g) in izip(correct, guessed)]
    print ind.count(True)
    print ind.count(True) / len(correct)
Пример #7
0
false_means_prime = N.array((-9.4634, -5.3991, -4.2773, 1.7494, -0.0822, -228.6211), dtype=N.float32)
false_vars_prime = N.array((3.0097, 6.0277, 8.3711, 10.7198, 13.4285, 456.7074), dtype=N.float32)

# mfcc, no c0: 20,000 frames of Hugh talking
true_means_prime = N.array((-4.8087, 3.9863, -0.5217, 1.3076, 0.7514, -4.6497), dtype=N.float32)
true_vars_prime = N.array((26.8496, 32.6631, 32.3662, 24.2963, 36.2244, 34.1555), dtype=N.float32)
false_means_prime = N.array((-6.8806, -1.3424, -3.8147, 0.4520, 0.7129, -3.1560), dtype=N.float32)
false_vars_prime = N.array((2.7468, 6.2286, 7.4355, 10.1530, 13.3865, 15.9309), dtype=N.float32)
true_prime = SimpleGaussianModel(nfeatures, GaussianModelBase.DIAGONAL_COVARIANCE)
true_prime.set_model(true_means_prime, true_vars_prime)
false_prime = SimpleGaussianModel(nfeatures, GaussianModelBase.DIAGONAL_COVARIANCE)
false_prime.set_model(false_means_prime, false_vars_prime)

primer = (true_prime, false_prime)

GaussianMixtureModel.seed(0)
gmm_mgr0 = GmmMgr(ncomps, nfeatures, GaussianModelBase.DIAGONAL_COVARIANCE, primer)
gmm_mgr1 = GmmMgr(ncomps, nfeatures, GaussianModelBase.DIAGONAL_COVARIANCE, primer)
classify0 = AdaptingGmmClassifier(gmm_mgr0, izip(labels, count()))
classify1 = AdaptingGmmClassifier(gmm_mgr1, izip(labels, count()))
classify0.set_relevance(333)
classify1.set_relevance(333)
classify0.set_num_em_iterations(2)
classify1.set_num_em_iterations(2)
classifier0 = AdaptingGmmClassProcessor(classify0)
classifier1 = AdaptingGmmClassProcessor(classify1)

gaussian_trainer = SimpleGaussianTrainer(labels, nfeatures)
trainer = FunctionProcessor(gaussian_trainer)

# audio.mic, fftmag, endpointer, mfcc0, square, mfcc1, classifier0, classifier1, trainer