Exemple #1
0
colors = 'rgbmc'
for i, mu in enumerate(c_mu):
    A = np.linspace(mu - 10 * model_sigma, mu + 10 * model_sigma, 100)
    B = 1 / np.sqrt(2 * np.pi * model_sigma**2) * np.exp(-(A - mu)**2 /
                                                         (2 * model_sigma**2))
    B *= pi[i]
    c = colors[i % len(colors)]
    plt.plot(A, B, '%s-' % c)
plt.hist(X, bins=50, alpha=0.5, facecolor='lightgrey')
plt.show()

# burn-in
#estimated_c_X, estimated_c_mu = estimate_dpm_model( alpha, X, burn_in_iterations, prior_mu, prior_sigma, model_sigma )

gmm = GMM(K, 'full')

gmm.fit(X, 0, init_params='wmc')
while not gmm.converged_:
    gmm.fit(X, 40, init_params='')

pi = gmm.weights
mu = gmm.means
sigma = gmm.covars

#print 'real components:'
#print c_mu

#print 'estimated components:'
#print estimated_c_mu
Exemple #2
0
np.random.seed(1)
n = 10
l = 256
im = np.zeros((l, l))
points = l * np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l / (4. * n))

mask = (im > im.mean()).astype(np.float)

img = mask + 0.3 * np.random.randn(*mask.shape)

hist, bin_edges = np.histogram(img, bins=60)
bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])

classif = GMM(n_components=2, cvtype='full')
classif.fit(img.reshape((img.size, 1)))

threshold = np.mean(classif.means)
binary_img = img > threshold

plt.figure(figsize=(11, 4))

plt.subplot(131)
plt.imshow(img)
plt.axis('off')
plt.subplot(132)
plt.plot(bin_centers, hist, lw=2)
plt.axvline(0.5, color='r', ls='--', lw=2)
plt.text(0.57, 0.8, 'histogram', fontsize=20, transform=plt.gca().transAxes)
plt.yticks([])
Exemple #3
0
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, k=4)
# Only take the first fold.
train_index, test_index = skf.__iter__().next()

X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]

n_classes = len(np.unique(y_train))

# Try GMMs using different types of covariances.
classifiers = dict((x, GMM(n_states=n_classes, cvtype=x))
                   for x in ['spherical', 'diag', 'tied', 'full'])

n_classifiers = len(classifiers)

pl.figure(figsize=(3 * n_classifiers / 2, 6))
pl.subplots_adjust(bottom=.01,
                   top=0.95,
                   hspace=.15,
                   wspace=.05,
                   left=.01,
                   right=.99)

for index, (name, classifier) in enumerate(classifiers.iteritems()):
    # Since we have class labels for the training data, we can
    # initialize the GMM parameters in a supervised manner.