y_true = numpy.zeros(1024)
for i in range(len(gt)):
    y_true += gt_weight[i] * gt[i].prob(x[:, None])

plt.figure(figsize=(16, 8))
plt.xlim(x_low, x_high)
plt.ylim(0.0, y_high)
plt.plot(x, y_true, c='g')
plt.savefig(os.path.join(out_dir, '0000.png'), bbox_inches='tight')

# Iterate, slowlly building up the number of samples used and outputting the fit for each...
out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048]

model = DPGMM(dims, 8)
model.setConcGamma(1 / 8., 1 / 8.)

for i, point in enumerate(samples):
    model.add(point)

    if (i + 1) in out:
        print '%i datapoints:' % (i + 1)

        # First fit the model...
        model.setPrior()
        p = ProgBar()
        it = model.solve()
        del p
        print 'Updated fitting in %i iterations' % it

        # Calculate it's posterior distribution...
示例#2
0
plt.figure(figsize=(16,8))
plt.xlim(x_low, x_high)
plt.ylim(0.0, y_high)
plt.plot(x, y_true, c='g')
plt.savefig(os.path.join(out_dir, '0000.png'), bbox_inches='tight')






# Iterate, slowlly building up the number of samples used and outputting the fit for each...
out = [8,16,32,64,128,256,512,1024,2048]

model = DPGMM(dims, 8)
model.setConcGamma(1/8., 1/8.)

for i,point in enumerate(samples):
  model.add(point)
  
  if (i+1) in out:
    print '%i datapoints:'%(i+1)
    
    # First fit the model...
    model.setPrior()
    p = ProgBar()
    it = model.solve()
    del p
    print 'Updated fitting in %i iterations'%it
    
    # Calculate it's posterior distribution...
示例#3
0
test = []
for _ in xrange(testCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    test.append((s, which))


# Train a model...
print "Trainning model..."
model = DPGMM(3)
for feat in train:
    model.add(feat)

model.setPrior()  # This sets the models prior using the data that has already been added.
model.setConcGamma(1.0, 0.25)  # Make the model a little less conservative about creating new categories..

p = ProgBar()
iters = model.solveGrow()
del p
print "Solved model with %i iterations" % iters


# Classify the test set...
probs = model.stickProb(numpy.array(map(lambda t: t[0], test)))
catGuess = probs.argmax(axis=1)
catTruth = numpy.array(map(lambda t: t[1], test))

confusion_matrix = numpy.zeros((count, model.getStickCap() + 1), dtype=numpy.int32)

for i in xrange(len(catGuess)):
for _ in xrange(testCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    test.append((s, which))

# Train a model...
print 'Trainning model...'
model = DPGMM(3)
for feat in train:
    model.add(feat)

model.setPrior(
)  # This sets the models prior using the data that has already been added.
model.setConcGamma(
    1.0, 0.25
)  # Make the model a little less conservative about creating new categories..

p = ProgBar()
iters = model.solveGrow()
del p
print 'Solved model with %i iterations' % iters

# Classify the test set...
probs = model.stickProb(numpy.array(map(lambda t: t[0], test)))
catGuess = probs.argmax(axis=1)
catTruth = numpy.array(map(lambda t: t[1], test))

confusion_matrix = numpy.zeros((count, model.getStickCap() + 1),
                               dtype=numpy.int32)