def dictionary(descriptors, N): pdb.set_trace() em = cv2.EM(N) em.train(descriptors) return np.float32(em.getMat("means")), \ np.float32(em.getMatVector("covs")), np.float32(em.getMat("weights"))[0]
def test_gaussian_mix(self): np.random.seed(10) cluster_n = 5 img_size = 512 points, ref_distrs = make_gaussians(cluster_n, img_size) em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC) em.train(points) means = em.getMat("means") covs = em.getMatVector( "covs") # Known bug: https://github.com/Itseez/opencv/pull/4232 found_distrs = zip(means, covs) matches_count = 0 meanEps = 0.05 covEps = 0.1 for i in range(cluster_n): for j in range(cluster_n): if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps): matches_count += 1 self.assertEqual(matches_count, cluster_n)
def dictionary(des, N): print 'start em training' em = cv2.EM(N) em.train(des) return np.float32(em.getMat("means")), \ np.float32(em.getMatVector("covs")), np.float32(em.getMat("weights"))[0]
def train_bg(self): samples = {} for i in self.bgdata: samples[i] = self.img[i[0], i[1]] key = samples.keys() data = [] for i in key: data.append(samples[i]) data = np.asarray(data) em = cv2.EM() retval, log, lables, probs = em.train(data, None, None, None) #print probs; return em, log
def classifier(cards, contours, figure_moments): """EM classification""" figures_list = [] metric_type = cv.CV_CONTOURS_MATCH_I2 max_number = max(map(lambda x: x.description['number'], cards)) number_list = range(max_number + 1) number_list.reverse() for number in number_list: for card in filter(lambda x: x.description['number'] == number, cards): figures_list.extend(card.figures) # clustering clusters = forel([figure.id for figure in figures_list], figure_moments) #print clusters values = figure_moments.values() #print values centers = map(lambda x: Classify.cluster_center(x, figure_moments), clusters) #print centers # EM clustering n = len(clusters) if n > SET_NUMBER: n = SET_NUMBER centers = range(0, SET_NUMBER) centers = map(lambda x: x * (1.0 / (SET_NUMBER - 1)), centers) #print centers em = cv2.EM(n) em.trainE(np.array(values), np.array(centers)) # init symbols symbol_list = range(n) figures = [] for figure in figures_list: if em.isTrained: (dummy, emmocm) = em.predict(np.array(figure_moments[figure.id])) measures = list(emmocm[0]) else: measures = em.mocm(figure_moments[figure.id], clusters, figure_moments) symbols = {} for symbol in symbol_list: symbols[symbol] = measures[symbol] figure.description['symbols'] = symbols figures.append(figure) #print figures Classify.set_feature(cards, symbol_list) return len(symbol_list)
s1, s2 = np.sqrt(w) * 3.0 cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA) if __name__ == '__main__': cluster_n = 5 img_size = 512 print 'press any key to update distributions, ESC - exit\n' while True: print 'sampling distributions...' points, ref_distrs = make_gaussians(cluster_n, img_size) print 'EM (opencv) ...' em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC) em.train(points) means = em.getMat('means') covs = em.getMatVector('covs') found_distrs = zip(means, covs) print 'ready!\n' img = np.zeros((img_size, img_size, 3), np.uint8) for x, y in np.int32(points): cv2.circle(img, (x, y), 1, (255, 255, 255), -1) for m, cov in ref_distrs: draw_gaussain(img, m, cov, (0, 255, 0)) for m, cov in found_distrs: draw_gaussain(img, m, cov, (0, 0, 255)) cv2.imshow('gaussian mixture', img)
def feature_detector(graph, cards, image, contours): """""" for card in cards: card_id = int(card.id) ((h, background_lightness, s), background_subimage, mask, x, y, winnames) = plot_intercontour_hist(image, card_id, contours, graph, False) card.winnames.append(winnames) image_name = '%d-%d: ' % (card_id, card_id) #cv2.imshow(image_name, background_subimage) if DEBUG: plot_selected_hist(background_lightness, image_name) cb0 = Classify.cluster_center(background_lightness) #print background_lightness #print cb0 #print background_subimage result = [] for figure in card.figures: if DEBUG: print 'figure.id: ', figure.id figure_outer_contour_id = int(figure.id) figure_inner_contour_id = int(graph.successors(figure.id)[0]) ((h, contour_lightness, s), contour_subimage, mask, x, y, winnames) = plot_intercontour_hist(image, figure_outer_contour_id, contours, graph, False) figure.winnames.append(winnames) image_name = '%d-%d: ' % (card_id, figure_outer_contour_id) #cv2.imshow(image_name, contour_subimage) if DEBUG: plot_selected_hist(contour_lightness, image_name) #print contour_lightness cc0 = Classify.cluster_center(contour_lightness) #print cc0 ((h, lightness, s), subimage, mask, x, y, winnames) = plot_intercontour_hist(image, figure_inner_contour_id, contours, graph, False) figure.winnames.append(winnames) image_name = '%d-%d: ' % (card_id, figure_inner_contour_id) #cv2.imshow(image_name, subimage) if DEBUG: plot_selected_hist(lightness, image_name) #print lightness centers = [cb0, cc0] if DEBUG: print centers mixture = mix([background_lightness, contour_lightness]) #print 'mixture: ', mixture CLUSTER_NUM = 2 em = cv2.EM(CLUSTER_NUM) em.trainE(np.array(mixture), np.array(centers)) #print em.isTrained() #print dir(em) #print em.getParams() #print dir(em.getAlgorithm()) #print em.getMat('means') means = map(lambda x: float(x), em.getMat('means')) [cb, cc] = means if DEBUG: print means #print em.paramHelp('means') h1 = cv2.compareHist(lightness, contour_lightness, 2) h2 = cv2.compareHist(lightness, background_lightness, 2) #print h1, h2 p = h1 / (h1 + h2) if DEBUG: print p #p = prob(lightness, em, CLUSTER_NUM) ca = Classify.cluster_center(lightness) em = cv2.EM(CLUSTER_NUM) em.trainE(np.array(mix([lightness])), np.array(means)) if DEBUG: print em.getMat('means') #print em.getMat('covs') if DEBUG: print em.getMat('weights') p = (ca - min(cb, cc)) / abs(cb - cc) #(dummy, check) = em.predict(np.array(cb)) #check = list(check[0]) #if not check.index(max(check)): p.reverse() if DEBUG: print p figure.description['shadings'] = p '''
s1, s2 = np.sqrt(w)*3.0 cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA) if __name__ == '__main__': cluster_n = 5 img_size = 512 print 'press any key to update distributions, ESC - exit\n' while True: print 'sampling distributions...' points, ref_distrs = make_gaussians(cluster_n, img_size) print 'EM (opencv) ...' em = cv2.EM(points, params = dict( nclusters = cluster_n, cov_mat_type = cv2.EM_COV_MAT_GENERIC) ) means = em.getMeans() covs = np.zeros((cluster_n, 2, 2), np.float32) covs = em.getCovs(covs) # FIXME found_distrs = zip(means, covs) print 'ready!\n' img = np.zeros((img_size, img_size, 3), np.uint8) for x, y in np.int32(points): cv2.circle(img, (x, y), 1, (255, 255, 255), -1) for m, cov in ref_distrs: draw_gaussain(img, m, cov, (0, 255, 0)) for m, cov in found_distrs: draw_gaussain(img, m, cov, (0, 0, 255)) cv2.imshow('gaussian mixture', img)
def dictionary(descriptors, N): em = cv2.EM(N) em.train(descriptors) return float32(em.getMat("means")), float32( em.getMatVector("covs")), float32(em.getMat("weights"))[0]
class EM(AbstractClassifier): classifier =cv2.EM() def predict(self, sample): return self.classifier.predict(np.matrix(sample))[0][1]
def computeVocabulary(descriptors, method, num_clusters, iterations, update, lib, covar_type, nprocs=1): print 'compute now vocabulary, method:', method if 'sparse' in method: dl = decomposition.DictionaryLearning(num_clusters, max_iter=iterations) dl.fit(descriptors) return np.array(dl.components_) elif 'vgmm' in method: if 'vgmm2' == method: gmm = mixture.BayesianGaussianMixture( num_clusters, covariance_type=covar_type, weight_concentration_prior_type='dirichlet_distribution') else: gmm = mixture.BayesianGaussianMixture( num_clusters, covariance_type=covar_type, weight_concentration_prior_type='dirichlet_process') gmm.fit(descriptors) trainer = gmm trainer.type_ = 'gmm' elif 'gmm' == method: if 'cv2' in lib: # FIXME add iterations parameter (standard: 100) try: em = cv2.ml.EM_create() em.setClustersNumber(num_clusters) em.trainEM(descriptors) means = em.getMeans() weights = em.getWeights() covs_ = em.getCovs() except e: print 'WARNING: got exception {}\ntry old EM'.format(e) em = cv2.EM(num_clusters, cv2.EM_COV_MAT_DIAGONAL) em.train(descriptors) means = em.getMat('means') weights = em.getMat('weights') covs_ = em.getMatVector('covs') # convert to sklearn gmm covs = np.array([np.diagonal(c) for c in covs_]) print means.shape, weights.shape, len(covs_), covs.shape gmm = mixture.GMM(num_clusters) gmm.weights_ = weights.flatten() gmm.means_ = means gmm._set_covars(covs) else: gmm = fitGMM(descriptors, num_clusters, iterations, update, covar_type, nprocs) trainer = gmm trainer.type_ = 'gmm' elif method == 'fast-gmm': means = cluster.MiniBatchKMeans( num_clusters, compute_labels=False, batch_size=100 * num_clusters).fit(descriptors).cluster_centers_ gmm = mixture.GaussianMixture(num_clusters, max_iter=1, covariance_type=covar_type, n_init=1, means_init=means) gmm.fit(descriptors) trainer = gmm trainer.type_ = 'gmm' elif method == 'hier-kmeans': print 'run hierarchical kmeans' import pyflann flann = pyflann.FLANN(centers_init='kmeanspp') branch_size = 32 num_branches = (num_clusters - 1) / (branch_size - 1) clusters = flann.hierarchical_kmeans(descriptors, branch_size, num_branches, iterations, centers_init='kmeanspp') trainer = cluster.KMeans(num_clusters) trainer.cluster_centers_ = clusters elif method == 'kmeans': trainer = cluster.KMeans(num_clusters) if 'cv2' in lib: term_crit = (cv2.TERM_CRITERIA_EPS, 100, 0.01) ret, labels, clustes = cv2.kmeans(descriptors, num_clusters, term_crit, 10,\ cv2.KMEANS_PP_CENTERS) trainer.cluster_centers_ = clusters else: trainer.fit(descriptors) #clusters = trainer.cluster_centers_.astype(np.float32) else: if method == 'mini-kmeans': trainer = cluster.MiniBatchKMeans( num_clusters, compute_labels=False, batch_size=10000 if num_clusters < 1000 else 50000) elif method == 'mean-shift': trainer = cluster.MeanShift() else: print 'unknown clustering method' sys.exit(1) trainer.fit(descriptors) #clusters = trainer.cluster_centers_.astype(np.float32) if not hasattr(trainer, 'means_'): trainer.means_ = trainer.cluster_centers_ trainer.type_ = 'kmeans' return trainer
means_arr = np.zeros((height, width, 2)) covs_arr = np.zeros((height, width, 2)) weights_arr = np.zeros((height, width, 2)) Mahalanobis = np.zeros((height, width, 2)) counter = 0 bg_gauss = np.zeros((height, width, 2)) T = 0.5 alpha = 0.05 cluster_n = 2 for y in range(height): for x in range(width): em = cv2.EM(cluster_n, cv2.EM_COV_MAT_DIAGONAL) em.train(samples[y][x]) means = em.getMat('means') covs = em.getMatVector('covs') weights = em.getMat('weights') covlist = [] covlist.append(covs[0][0][0]) covlist.append(covs[1][0][0]) # print means means_arr[y][x] = means.T covs_arr[y][x] = covlist weights_arr[y][x] = weights[0]