except: pass os.mkdir(out_dir) # Output parameters... low = -5.0 high = 9.0 width = 400 height = 200 scale = 1.2 * max( map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Iterate, slowlly building up the number of samples used and outputting the fit for each... out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048] model = DPGMM(dims, 8) for i, point in enumerate(samples): model.add(point) if (i + 1) in out: print '%i datapoints:' % (i + 1) # First fit the model... model.setPrior() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations' % it # Some information... #print 'a:' #print model.alpha
x = numpy.linspace(x_low, x_high, 1024) y_true = numpy.zeros(1024) for i in range(len(gt)): y_true += gt_weight[i] * gt[i].prob(x[:, None]) plt.figure(figsize=(16, 8)) plt.xlim(x_low, x_high) plt.ylim(0.0, y_high) plt.plot(x, y_true, c='g') plt.savefig(os.path.join(out_dir, '0000.png'), bbox_inches='tight') # Iterate, slowlly building up the number of samples used and outputting the fit for each... out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048] model = DPGMM(dims, 8) model.setConcGamma(1 / 8., 1 / 8.) for i, point in enumerate(samples): model.add(point) if (i + 1) in out: print '%i datapoints:' % (i + 1) # First fit the model... model.setPrior() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations' % it
except: pass os.mkdir(out_dir) # Output parameters... low = -2.0 high = 14.0 width = 800 height = 400 scale = 1.5 * max(map(lambda i: gt_weight[i]*gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Fill in the model... model = DPGMM(dims) for point in samples: model.add(point) model.setPrior() # Iterate over the number of sticks, increasing till it stops getting better... prev = None while True: print 'Stick count = %i'%model.getStickCap() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations'%it # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height,width,3))
which = numpy.random.multinomial(1, mix).argmax() covar = sd[which] * numpy.identity(3) s = numpy.random.multivariate_normal(mean[which, :], covar) train.append(s) test = [] for _ in xrange(testCount): which = numpy.random.multinomial(1, mix).argmax() covar = sd[which] * numpy.identity(3) s = numpy.random.multivariate_normal(mean[which, :], covar) test.append((s, which)) # Train a model... print "Trainning model..." model = DPGMM(3) for feat in train: model.add(feat) model.setPrior() # This sets the models prior using the data that has already been added. model.setConcGamma(1.0, 0.25) # Make the model a little less conservative about creating new categories.. p = ProgBar() iters = model.solveGrow() del p print "Solved model with %i iterations" % iters # Classify the test set... probs = model.stickProb(numpy.array(map(lambda t: t[0], test))) catGuess = probs.argmax(axis=1)
# Output parameters... low = -2.0 high = 14.0 width = 800 height = 400 scale = 1.5 * max(map(lambda i: gt_weight[i]*gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Iterate, slowlly building up the number of samples used and outputting the fit for each... out = [8,16,32,64,128,256,512,1024,2048] model = DPGMM(dims, 8) for i,point in enumerate(samples): model.add(point) if (i+1) in out: print '%i datapoints:'%(i+1) # First fit the model... model.setPrior() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations'%it # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height,width,3)) draw = model.sampleMixture()
for _ in xrange(trainCount): which = numpy.random.multinomial(1, mix).argmax() covar = sd[which] * numpy.identity(3) s = numpy.random.multivariate_normal(mean[which, :], covar) train.append(s) test = [] for _ in xrange(testCount): which = numpy.random.multinomial(1, mix).argmax() covar = sd[which] * numpy.identity(3) s = numpy.random.multivariate_normal(mean[which, :], covar) test.append((s, which)) # Train a model... print 'Trainning model...' model = DPGMM(3) for feat in train: model.add(feat) model.setPrior( ) # This sets the models prior using the data that has already been added. model.setConcGamma( 1.0, 0.25 ) # Make the model a little less conservative about creating new categories.. p = ProgBar() iters = model.solveGrow() del p print 'Solved model with %i iterations' % iters # Classify the test set...
plt.figure(figsize=(16,8)) plt.xlim(x_low, x_high) plt.ylim(0.0, y_high) plt.plot(x, y_true, c='g') plt.savefig(os.path.join(out_dir, '0000.png'), bbox_inches='tight') # Iterate, slowlly building up the number of samples used and outputting the fit for each... out = [8,16,32,64,128,256,512,1024,2048] model = DPGMM(dims, 8) model.setConcGamma(1/8., 1/8.) for i,point in enumerate(samples): model.add(point) if (i+1) in out: print '%i datapoints:'%(i+1) # First fit the model... model.setPrior() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations'%it
dims=2 numpy.random.seed(1); gt = Gaussian(dims) gt.setMean([1.0,0.0]) gt.setCovariance([[1.0,0.8],[0.8,1.0]]) sample_count = 30000 sample=[] for _ in xrange(sample_count): sample.append(gt.sample()) f=open('data.txt','w') for x in sample: f.write('%lf,%lf\n'%(x[0],x[1])) f.close() model = DPGMM(dims, 1) for i,data in enumerate(sample): model.add(data) start = time.time() model.setPrior() elapsed_time = time.time() - start num=model.solve() print elapsed_time print num print "%f"%(model.prob([2.0,1.0])) #for i in range(10): # x=i*0.4-2.0 #print "%f,%f"%(x,model.prob([x]))
cov = matrix([[1,1],[1,2]]) x1,y1 = random.multivariate_normal(mu1,cov,500).T samples = [] for i in range(len(x1)): samples.append([x1[i],y1[i]]) low = -8.0 high = 8.0 width = 100 height = 200 scale = 1.1 * norm_pdf_multivariate(mu1,mu1,cov) out = [8,16,32,64,128,256,512,1024,2048] model = DPGMM(dims, 6) for i,point in enumerate(samples): model.add(point) model.setPrior() #p = ProgBar() it = model.solve() #del p img = ones((height,width,3)) draw = model.sampleMixture() for px in xrange(width): x = float(px)/float(width) * (high-low) + low for py in xrange(width): y = float(py)/float(width) * (high-low) + low # print "%f,%f"%(x,y) print "%f\t%f\t%f"%(x,y,model.prob([x,y])) sys.exit()
# Output parameters... low = -2.0 high = 14.0 width = 800 height = 400 scale = 1.5 * max(map(lambda i: gt_weight[i]*gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Iterate a number of sample counts... out = [8,16,32,64,128,256,512,1024,2048] for dpc in out: print '%i datapoints:'%(dpc) # Fill in the model... model = DPGMM(dims) for point in samples[:dpc]: model.add(point) model.setPrior() # Solve... p = ProgBar() model = model.multiGrowSolve(8) del p # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height,width,3)) draw = model.sampleMixture() for px in xrange(width): x = float(px)/float(width) * (high-low) + low
# Output parameters... low = -2.0 high = 14.0 width = 800 height = 400 scale = 1.5 * max( map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Iterate a number of sample counts... out = [8, 16, 32, 64, 128, 256, 512, 1024, 2048] for dpc in out: print '%i datapoints:' % (dpc) # Fill in the model... model = DPGMM(dims) for point in samples[:dpc]: model.add(point) model.setPrior() # Solve... p = ProgBar() model = model.multiGrowSolve(8) del p # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height, width, 3)) draw = model.sampleMixture() for px in xrange(width): x = float(px) / float(width) * (high - low) + low
try: shutil.rmtree(out_dir) except: pass os.mkdir(out_dir) # Output parameters... low = -2.0 high = 14.0 width = 800 height = 400 scale = 1.5 * max( map(lambda i: gt_weight[i] * gt[i].prob(gt[i].getMean()), xrange(len(gt)))) # Fill in the model... model = DPGMM(dims) for point in samples: model.add(point) model.setPrior() # Iterate over the number of sticks, increasing till it stops getting better... prev = None while True: print 'Stick count = %i' % model.getStickCap() p = ProgBar() it = model.solve() del p print 'Updated fitting in %i iterations' % it # Now plot the estimated distribution against the actual distribution... img = numpy.ones((height, width, 3))