except:
    pass
os.mkdir(out_dir)

# Output parameters...
low = -5.0
high = 9.0
width = 400
height = 200
scale = 1.2 * max(
    map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt))))

# Iterate, slowlly building up the number of samples used and outputting the fit for each...
out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048]

model = DPGMM(dims, 8)
for i, point in enumerate(samples):
    model.add(point)

    if (i + 1) in out:
        print '%i datapoints:' % (i + 1)
        # First fit the model...
        model.setPrior()
        p = ProgBar()
        it = model.solve()
        del p
        print 'Updated fitting in %i iterations' % it

        # Some information...
        #print 'a:'
        #print model.alpha
Exemplo n.º 2
0
    shutil.rmtree(out_dir)
except:
    pass
os.mkdir(out_dir)

# Output parameters...
low = 1.0
high = 9.0
width = 400
height = 200
scale = 1.1 * gt.prob(gt.getMean())

# Iterate, slowlly building up the number of samples used and outputting the fit for each...
out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048]

model = DPGMM(dims, 6)
for i, point in enumerate(samples):
    model.add(point)

    if (i + 1) in out:
        print '%i datapoints:' % (i + 1)
        # First fit the model...
        model.setPrior()
        p = ProgBar()
        it = model.solve()
        del p
        print 'Updated fitting in %i iterations' % it

        # Now plot the estimated distribution against the actual distribution...
        img = numpy.ones((height, width, 3))
        draw = model.sampleMixture()
for _ in xrange(trainCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    train.append(s)

test = []
for _ in xrange(testCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    test.append((s, which))

# Train a model...
print 'Trainning model...'
model = DPGMM(3)
for feat in train:
    model.add(feat)

model.setPrior(
)  # This sets the models prior using the data that has already been added.
model.setConcGamma(
    1.0, 0.25
)  # Make the model a little less conservative about creating new categories..

p = ProgBar()
iters = model.solveGrow()
del p
print 'Solved model with %i iterations' % iters

# Classify the test set...
Exemplo n.º 4
0
# Output parameters...
low = -2.0
high = 14.0
width = 800
height = 400
scale = 1.5 * max(
    map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt))))

# Iterate a number of sample counts...
out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048]

for dpc in out:
    print '%i datapoints:' % (dpc)
    # Fill in the model...
    model = DPGMM(dims)
    for point in samples[:dpc]:
        model.add(point)
    model.setPrior()

    # Solve...
    p = ProgBar()
    model = model.multiGrowSolve(8)
    del p

    # Now plot the estimated distribution against the actual distribution...
    img = numpy.ones((height, width, 3))
    draw = model.sampleMixture()

    for px in xrange(width):
        x = float(px) / float(width) * (high - low) + low