Пример #1
0
def test_non_contiguous_fmatrix():
    from milksets.wine import load
    features,_ = load()
    features = features[:,::2]
    assigns, centroids = milk.unsupervised.kmeans(features, 3, R=2, max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)

    features = features.astype(np.int32)
    assigns, centroids = milk.unsupervised.kmeans(features, 3, R=2, max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)
Пример #2
0
def test_non_contiguous_fmatrix():
    from milksets.wine import load
    features,_ = load()
    features = features[:,::2]
    assigns, centroids = milk.unsupervised.kmeans(features, 3, R=2, max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)

    features = features.astype(np.int32)
    assigns, centroids = milk.unsupervised.kmeans(features, 3, R=2, max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)
 def apply(self, features):
     from milk.unsupervised.kmeans import assign_centroids
     f0,f1 = features
     features = assign_centroids(f0, self.centroids, histogram=True, normalise=self.normalise)
     if f1 is not None and len(f1):
         features = np.concatenate((features, f1))
     return self.base.apply(features)
Пример #4
0
 def train(self, features, labels, **kwargs):
     from milk.unsupervised.kmeans import assign_centroids
     tfeatures = np.array([ assign_centroids(f, self.codebook, histogram=True, normalise=self.normalise)
                     for f,_ in features])
     tfeatures = np.hstack((tfeatures, np.array([f for _,f in features])))
     base_model = self.base.train(tfeatures, labels, **kwargs)
     return codebook_model(self.codebook, base_model, self.normalise)
Пример #5
0
 def apply(self, features):
     from milk.unsupervised.kmeans import assign_centroids
     f0,f1 = features
     features = assign_centroids(f0, self.centroids, histogram=True, normalise=self.normalise)
     if f1 is not None and len(f1):
         features = np.concatenate((features, f1))
     return self.base.apply(features)
Пример #6
0
 def train(self, features, labels, **kwargs):
     from milk.unsupervised.kmeans import assign_centroids
     tfeatures = np.array([ assign_centroids(f, self.codebook, histogram=True, normalise=self.normalise)
                     for f,_ in features])
     tfeatures = np.hstack((tfeatures, np.array([f for _,f in features])))
     base_model = self.base.train(tfeatures, labels, **kwargs)
     return codebook_model(self.codebook, base_model, self.normalise)
def project(features, centroids):
    from milk.unsupervised.kmeans import assign_centroids
    return np.array([
        np.concatenate([
            assign_centroids(s, centroids, histogram=True, normalise=True),
            other
            ])
        for s,other in features])
Пример #8
0
def test_assign_cids():
    from milksets.wine import load
    features, _ = load()
    assigns, centroids = milk.unsupervised.kmeans(features,
                                                  3,
                                                  R=2,
                                                  max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)
def aic(features, centroids):
    from milk.unsupervised.gaussianmixture import AIC
    from milk.unsupervised.kmeans import assign_centroids
    assignments = []
    feats = []
    for fs,_ in features:
        assignments.extend(assign_centroids(fs, centroids))
        feats.append(fs)
    return AIC(np.concatenate(feats), assignments, centroids)
Пример #10
0
 def train(self, features, labels, **kwargs):
     allfeatures = np.concatenate(features)
     assignments, centroids = select_best_kmeans(allfeatures, self.ks, 1,
                                                 "AIC")
     histograms = [
         assign_centroids(f, centroids, histogram=True, normalise=1)
         for f in features
     ]
     base_model = self.base.train(histograms, labels, **kwargs)
     return precluster_model(centroids, base_model)
Пример #11
0
 def train(self, features, labels, **kwargs):
     allfeatures = np.vstack(features)
     assignments, centroids = select_best_kmeans(allfeatures,
                                                 self.ks,
                                                 repeats=1,
                                                 method="AIC",
                                                 R=self.R)
     histograms = [
         assign_centroids(f,
                          centroids,
                          histogram=True,
                          normalise=self.normalise) for f in features
     ]
     base_model = self.base.train(histograms, labels, **kwargs)
     return precluster_model(centroids, base_model)
Пример #12
0
    def testing(self, qImages):
        # set count for each brand to 0
 
        t01=0
        t11=0
        t21=0
        t31=0
        t41=0
        for qImage in qImages:
        # read input image
            print qImage
            qImg = imread(qImage)
            keypoints, descriptors = self.extractFeatures(qImg)   
            descriptors = np.array(descriptors)

            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

            #scikit learn clusters
            t00=time()
            self.assignClusters(descriptors)
            t01 = t01+ time()-t00
            


            FLANN_INDEX_KDTREE = 0
            index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
            search_params = dict(checks=50) 

            # Flann matcher

            t1=time()

            flann = cv2.FlannBasedMatcher(index_params,search_params)

            flann.knnMatch(descriptors,self.opencv_centers,k=1)
            t11 = t11+ time()-t1

            


            # brute Force Matcher
            t2=time()
            bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
            bf.knnMatch(descriptors,self.opencv_centers,k=1)
            t21 = t21+ time()-t2
            
            #scipy Kmeans 
            t3= time()
            kmeans2(descriptors, self.scipy_centers, iter=10)
            t31 = t31+ time()-t3

            # milk cluster
            t4=time()
            assign_centroids(descriptors, self.milk_centers)
            t41 = t41+ time()-t4
            
        print "time for scikit learn to assign cluster",t01
        print "time for flann matcher to assign clusters",t11
        print "time for bfmatcher to assign clusters",t21
        print "time for scipy kmeans to assign clusters ",t31
        print "time for  milk with parallel processing copyrights MIT to assign clusters ",t41
Пример #13
0
 def train(self, features, labels, **kwargs):
     allfeatures = np.concatenate(features)
     assignments, centroids = select_best_kmeans(allfeatures, self.ks, 1, "AIC")
     histograms = [assign_centroids(f, centroids, histogram=True, normalise=1) for f in features]
     base_model = self.base.train(histograms, labels, **kwargs)
     return precluster_model(centroids, base_model)
Пример #14
0
 def train(self, features, labels, **kwargs):
     allfeatures = np.vstack(features)
     assignments, centroids = select_best_kmeans(allfeatures, self.ks, repeats=1, method="AIC", R=self.R)
     histograms = [assign_centroids(f, centroids, histogram=True, normalise=self.normalise) for f in features]
     base_model = self.base.train(histograms, labels, **kwargs)
     return precluster_model(centroids, base_model)
Пример #15
0
 def apply(self, features):
     histogram = assign_centroids(features, self.centroids, histogram=True, normalise=self.normalise)
     return self.base.apply(histogram)
Пример #16
0
 def apply(self, features):
     histogram = assign_centroids(features, self.centroids, histogram=True, normalise=1)
     return self.base.apply(histogram)
Пример #17
0
def test_assign_cids():
    from milksets.wine import load
    features,_ = load()
    assigns, centroids = milk.unsupervised.kmeans(features, 3, R=2, max_iters=10)
    assert np.all(assign_centroids(features, centroids) == assigns)