width = 400
height = 200
scale = 1.2 * max(
    map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt))))

# Iterate, slowlly building up the number of samples used and outputting the fit for each...
out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048]

model = DPGMM(dims, 8)
for i, point in enumerate(samples):
    model.add(point)

    if (i + 1) in out:
        print '%i datapoints:' % (i + 1)
        # First fit the model...
        model.setPrior()
        p = ProgBar()
        it = model.solve()
        del p
        print 'Updated fitting in %i iterations' % it

        # Some information...
        #print 'a:'
        #print model.alpha
        #print 'v:'
        #print model.v
        #print 'stick breaking weights:'
        #print model.v[:,0] / model.v.sum(axis=1)
        #print 'stick weights:'
        #print model.intMixture()[0]
        #print 'z sums:'
Exemplo n.º 2
0
test = []
for _ in xrange(testCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    test.append((s, which))


# Train a model...
print "Trainning model..."
model = DPGMM(3)
for feat in train:
    model.add(feat)

model.setPrior()  # This sets the models prior using the data that has already been added.
model.setConcGamma(1.0, 0.25)  # Make the model a little less conservative about creating new categories..

p = ProgBar()
iters = model.solveGrow()
del p
print "Solved model with %i iterations" % iters


# Classify the test set...
probs = model.stickProb(numpy.array(map(lambda t: t[0], test)))
catGuess = probs.argmax(axis=1)
catTruth = numpy.array(map(lambda t: t[1], test))

confusion_matrix = numpy.zeros((count, model.getStickCap() + 1), dtype=numpy.int32)
Exemplo n.º 3
0


# Output parameters...
low = -2.0
high = 14.0
width = 800
height = 400
scale = 1.5 * max(map(lambda i: gt_weight[i]*gt[i].prob(gt[i].getMean()), xrange(len(gt))))



# Fill in the model...
model = DPGMM(dims)
for point in samples: model.add(point)
model.setPrior()


# Iterate over the number of sticks, increasing till it stops getting better...
prev = None
while True:
  print 'Stick count = %i'%model.getStickCap()
  p = ProgBar()
  it = model.solve()
  del p
  print 'Updated fitting in %i iterations'%it

  # Now plot the estimated distribution against the actual distribution...
  img = numpy.ones((height,width,3))
  draw = model.sampleMixture()
    train.append(s)

test = []
for _ in xrange(testCount):
    which = numpy.random.multinomial(1, mix).argmax()
    covar = sd[which] * numpy.identity(3)
    s = numpy.random.multivariate_normal(mean[which, :], covar)
    test.append((s, which))

# Train a model...
print 'Trainning model...'
model = DPGMM(3)
for feat in train:
    model.add(feat)

model.setPrior(
)  # This sets the models prior using the data that has already been added.
model.setConcGamma(
    1.0, 0.25
)  # Make the model a little less conservative about creating new categories..

p = ProgBar()
iters = model.solveGrow()
del p
print 'Solved model with %i iterations' % iters

# Classify the test set...
probs = model.stickProb(numpy.array(map(lambda t: t[0], test)))
catGuess = probs.argmax(axis=1)
catTruth = numpy.array(map(lambda t: t[1], test))

confusion_matrix = numpy.zeros((count, model.getStickCap() + 1),