示例#1
0
def test_kmeans_ANN():
    num_data = 5000
    num_centers = 4
    dimension = 8
    noise_level = 0.1

    centers = np.random.random_integers(-40, 40, (num_centers, dimension)).astype(np.float32)
    data = np.empty((num_data, dimension), dtype=np.float32)
    for i in range(num_data):
        data[i] = centers[i % num_centers] + np.random.random_sample(dimension)*noise_level

    found_centers = kmeans(data, num_centers, initialization="PLUSPLUS", algorithm="ANN")
    found_assignments = kmeans_quantize(data, found_centers, algorithm="ANN")

    assert found_centers.dtype == np.float32
    assert found_centers.shape == (num_centers, dimension)

    dist = set_distance(centers, found_centers)
    assert dist <= noise_level, dist

    for i in range(num_centers):
        for j in range(num_centers):
            if i != j:
                assert found_assignments[i] != found_assignments[j]
    for i in range(num_data):
        assert found_assignments[i] == found_assignments[i % num_centers]
示例#2
0
def test_kmeans_ANN():
    num_data = 5000
    num_centers = 4
    dimension = 8
    noise_level = 0.1

    centers = np.random.randint(-40, 40, (num_centers, dimension)).astype(np.float32)
    data = np.empty((num_data, dimension), dtype=np.float32)
    for i in range(num_data):
        data[i] = centers[i % num_centers] + np.random.random_sample(dimension)*noise_level

    found_centers = kmeans(data, num_centers, initialization="PLUSPLUS", algorithm="ANN")
    found_assignments = kmeans_quantize(data, found_centers, algorithm="ANN")

    assert found_centers.dtype == np.float32
    assert found_centers.shape == (num_centers, dimension)

    dist = set_distance(centers, found_centers)
    assert dist <= noise_level, dist

    for i in range(num_centers):
        for j in range(num_centers):
            if i != j:
                assert found_assignments[i] != found_assignments[j]
    for i in range(num_data):
        assert found_assignments[i] == found_assignments[i % num_centers]
示例#3
0
def matlab_train_one_vs_rest_SVM(path_boxes_np, CAE_model_path, K, args):
    data = extract_features(path_boxes_np, CAE_model_path, args)

    centers = kmeans(data,
                     num_centers=K,
                     initialization='PLUSPLUS',
                     num_repetitions=10,
                     max_num_comparisons=100,
                     max_num_iterations=100,
                     algorithm='LLOYD',
                     num_trees=3)
    labels = kmeans_quantize(data, centers)
    labels = np.array(labels, dtype=np.int)

    #data=data.astype(np.float64)
    #data_flatten=data.flatten()
    data = data.tolist()
    labels = labels.tolist()

    _labels = []
    _w = []
    _b = []

    for i in range(K):
        _temp = labels
        for j in range(len(labels)):
            if _temp[j] == i:
                _temp[j] = 1.
            else:
                _temp[j] = -1.
        _labels.append(_temp)

    import matlab
    import matlab.engine
    import scipy.io as io

    # to save data into data.mat
    io.savemat('../matlab_files/data.mat', {'data': data})
    # to save _labels into labels.mat,
    _labels = np.array(_labels, dtype=int)
    io.savemat('../matlab_files/labels.mat', {'labels': _labels})

    eng = matlab.engine.start_matlab()

    print('use matlab backend to train!')
    eng.SVM_train(nargout=0)
    eng.quit()
    #eng.SVM_train()
    # rename
    os.rename('../matlab_files/data.mat',
              '../matlab_files/{}_data.mat'.format(args.dataset))
    os.rename('../matlab_files/labels.mat',
              '../matlab_files/{}_labels.mat'.format(args.dataset))
    os.rename('../matlab_files/weights.mat',
              '../matlab_files/{}_weights.mat'.format(args.dataset))
    os.rename('../matlab_files/biases.mat',
              '../matlab_files/{}_biases.mat'.format(args.dataset))
示例#4
0
    def __transform_one(self, x):
        """Compute bag of words bincount per one image.

        Args:
            x: Features set of size (w * h, c).

        Returns:
            Bincount.
        """
        assignment = kmeans_quantize(x, self.centers, algorithm=self.algorithm)
        return np.bincount(assignment, minlength=self.clusters_number)
示例#5
0
def encodeVLAD(images, encoder, dmd_options):
    descrs = []

    pool = Pool(processes=4)
    features = [pool.apply_async(computeSDMD, args=(img, dmd_options, 0)) for img in images]
    pool.close()
    pool.join()

    centers = encoder['centers']

    for feature in features:
        feature = feature.get().T

        new_feature = np.zeros((feature.shape[0], feature.shape[1]), dtype=np.float32)
        new_feature[:, :] = feature[:, :]
        predicted_labels = kmeans_quantize(data=new_feature, centers=centers)
        n_cluster = centers.shape[0]
        [n_patch, n_feature] = new_feature.shape

        Vm = np.zeros([n_cluster, n_feature], dtype=np.float32)
        Vc = np.zeros([n_cluster, n_feature], dtype=np.float32)
        Vs = np.zeros([n_cluster, n_feature], dtype=np.float32)
        for i in range(n_cluster):
            Ni = np.sum(predicted_labels == i)
            if Ni > 0:
                i_features = new_feature[predicted_labels == i, :]
                mi = np.mean(i_features, axis=0)
                Vm[i] = Ni * (mi - centers[i])
                Vc[i] = (1 / Ni) * np.sum((i_features - mi) ** 2, axis=0) - (1 / Ni) * np.sum(
                    (i_features - centers[i]) ** 2, axis=0)
                Vs[i] = ((1 / Ni) * (np.sum((i_features - mi) ** 3, axis=0))) / np.maximum(
                    ((1 / Ni) * np.sum((i_features - mi) ** 2, axis=0)) ** 1.5, 1e-12) - (
                                (1 / Ni) * (np.sum((i_features - centers[i]) ** 3, axis=0))) / np.maximum(
                    ((1 / Ni) * np.sum((i_features - centers[i]) ** 2, axis=0)) ** 1.5, 1e-12)
        # power normalization, also called square-rooting normalization
        Vm = np.sign(Vm) * np.sqrt(np.abs(Vm))
        Vc = np.sign(Vc) * np.sqrt(np.abs(Vc))
        Vs = np.sign(Vs) * np.sqrt(np.abs(Vs))
        # # L2 normalization
        # Vm /= np.maximum(np.linalg.norm(Vm, axis=1)[:, None], 1e-12)
        # Vc /= np.maximum(np.linalg.norm(Vc, axis=1)[:, None], 1e-12)
        # Vs /= np.maximum(np.linalg.norm(Vs, axis=1)[:, None], 1e-12)

        V_all = np.vstack((Vm, Vc, Vs)).flatten()[None, :]

        descrs = V_all if len(descrs) == 0 else np.concatenate((descrs, V_all), axis=0)
    return descrs.astype(np.float32)
示例#6
0
def train_one_vs_rest_SVM(path_boxes_np, CAE_model_path, K, args):
    data = extract_features(path_boxes_np, CAE_model_path, args)
    print('feature extraction finish!')
    # clusters, the data to be clustered by Kmeans
    # clusters=KMeans(n_clusters=K,init='k-means++',n_init=10,algorithm='full',max_iter=300).fit(data)
    centers = kmeans(data,
                     num_centers=K,
                     initialization='PLUSPLUS',
                     num_repetitions=10,
                     max_num_comparisons=300,
                     max_num_iterations=300)
    labels = kmeans_quantize(data, centers)
    # nums=np.zeros(10,dtype=int)
    # for item in clusters.labels_:
    #     nums[item]+=1
    # print(nums)
    print('clustering finished!')
    # One-Verse-Rest SVM: to train OVC-SVM for
    clf = svm.LinearSVC(C=1.0, multi_class='ovr', max_iter=len(labels) * 5)
    clf.fit(data, labels)
    joblib.dump(clf, svm_save_path_pre + args.dataset + '.m')
    print('train finished!')
示例#7
0
def train_one_vs_rest_SVM(path_boxes_np, CAE_model_path, K, args):
    data = extract_features(path_boxes_np, CAE_model_path, args)
    print('feature extraction finish!')
    # clusters, the data to be clustered by Kmeans
    # clusters=KMeans(n_clusters=K,init='k-means++',n_init=10,algorithm='full',max_iter=300).fit(data)
    centers = kmeans(data,
                     num_centers=K,
                     initialization='PLUSPLUS',
                     num_repetitions=10,
                     max_num_comparisons=100,
                     max_num_iterations=100,
                     algorithm='LLOYD',
                     num_trees=3)
    labels = kmeans_quantize(data, centers)

    # to get the sparse matrix of labels
    sparse_labels = np.eye(K)[labels]
    sparse_labels = (sparse_labels - 0.5) * 2

    # nums=np.zeros(10,dtype=int)
    # for item in clusters.labels_:
    #     nums[item]+=1
    # print(nums)
    print('clustering finished!')
    # SGDC classifier with onevsrest classifier to replace the ovc-svm with hinge loss and SDCA optimizer in the paper
    base_estimizer = SGDClassifier(max_iter=10000,
                                   warm_start=True,
                                   loss='hinge',
                                   early_stopping=True,
                                   n_iter_no_change=50,
                                   l1_ratio=0)
    ovr_classifer = OneVsRestClassifier(base_estimizer)

    #clf=svm.LinearSVC(C=1.0,multi_class='ovr',max_iter=len(labels)*5,loss='hinge',)
    ovr_classifer.fit(data, sparse_labels)
    svm_model_path = f'{svm_save_dir}/{args.dataset}.m'
    joblib.dump(ovr_classifer, svm_model_path)
    print('train finished!')
    
    [(hogvalues,valid_points)] = calcHog(gray,points_org,patchsize,ncells,i-1)
    for m in range(hogvalues.shape[0]):
        hogvalues[m] /= sum(hogvalues[m])
    hogmatrix.append(hogvalues)
        
        
    img[dmax]=[0,0,255]
    
#    cv2.imshow('image',img)
#    cv2.waitKey(0)
#    cv2.destroyAllWindows()
    
hogmatrixcat = np.concatenate((hogmatrix[:]),axis=0)
kmeancenters = kmeans.kmeans(hogmatrixcat,kvalue,initialization='PLUSPLUS')
kmeanclusters = kmeans.kmeans_quantize(hogmatrixcat,kmeancenters)

label_org = []
clustercount = 0

for i in range(len(hogmatrix)):
    count = 0
    label2 = []
    while count < hogmatrix[i].shape[0]:
        label2.append(kmeanclusters[clustercount])
        clustercount += 1
        count += 1
    label_org.append(label2)

hist_org=np.zeros((len(label_org),kvalue))
    
示例#9
0
def encodeHVLAD(images, encoder, dmd_options):
    l_descrs, m_descrs, s_descrs, all_descrs = [], [], [], []

    pool = Pool(processes=4)
    l_features = [pool.apply_async(computeSDMD, args=(img, dmd_options, 0)) for img in images]#零层高斯金字塔出现的图片特征
    m_features = [pool.apply_async(computeSDMD, args=(img, dmd_options, 1)) for img in images]#1层高斯金字塔出现的图片特征
    s_features = [pool.apply_async(computeSDMD, args=(img, dmd_options, 2)) for img in images]#2层高斯金字塔出现的图片特征
    pool.close()
    pool.join()

    centers = encoder['centers']
    # vars = encoder['vars']
    # skews = encoder['skews']
#以下的循环,第一步:循环每一次的变量,就是从上面通过computeSDMD获得的金字塔一层中,一个图片的SDMD特征(80,8649),把它转置命名为features,和newfeatures
    # 第二步:为每个特征和聚类中心索引上,就有了predicted_labels(1,8649)
    # 把每个类中心索引的特征值单拿出来,计算Vm,特征值的平均值和聚类中心相减再乘上索引到当前聚类中心的特征个数,计算Vc,
    # Vc就是当前聚类中心特征的方差,减去以聚类中心作为均值的方差,最后按坐标累加再除以特征个数,每个聚类中心都会形成(1,80),然后经过
    # 128循环,形成(128,80)
    # V_all就是把Vm和Vc拼接在一起,然后再转化成一维(1,128*80)
    # 最后经过40次循环,形成(40,128*80)这个就是一层高斯金字塔的encode
    # 接下来的另外两个循环也是一样的,只不过就是再高斯金字塔的更高层而已,计算完l,s,m也就是1.2.3层的vlad编码,再把这三个放在一起取平均值
    # 得到的decris
    print('进入HVLAD')
    for features in l_features:
        features = features.get().T#图片特征就是(80,8649),经过转置,图片特征是(8649,80)

        new_features = np.zeros((features.shape[0], features.shape[1]), dtype=np.float32)#全零矩阵(8649,80)
        new_features[:, :] = features[:, :]#把features的值赋给new_features

        predicted_labels = kmeans_quantize(data=new_features, centers=centers)#就是聚类为每个聚类中心分配特征,或者说为每一行的特征找索引
        n_cluster = centers.shape[0]#center(128,80)
        [n_patch, n_feature] = features.shape
#以上是对图片的特征进行与聚类中心的索引
        Vm = np.zeros([n_cluster, n_feature], dtype=np.float32)#Vm(128,80)
        Vc = np.zeros([n_cluster, n_feature], dtype=np.float32)#vc (128,80)
        # Vs = np.zeros([n_cluster, n_feature], dtype=np.float32)
        for i in range(n_cluster):
            Ni = np.sum(predicted_labels == i)
            if Ni > 0:
                i_features = features[predicted_labels == i, :] #挑选相应的列,(  Ni,80)
                mi = np.mean(i_features, axis=0)#mi (1,80)
                Vm[i] = Ni * (mi - centers[i])#特征与聚类中心相减然后再乘上使用这个聚类中心的索引到聚类中心的个数
                Vc[i] = (1 / Ni) * np.sum((i_features - mi) ** 2, axis=0) - (1 / Ni) * np.sum(
                    (i_features - centers[i]) ** 2, axis=0)#前面的np.sum是先计算ifeatures每一行的值减去均值,然后平方累加,好像就是求方差,第二个就是把聚类中心当作均值来求方差
                #上面应该是不同均值计算的方差均值相减
                # Vs[i] = ((1 / Ni) * (np.sum((i_features - mi) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - mi) ** 2, axis=0)) ** 1.5, 1e-12) - (
                #                 (1 / Ni) * (np.sum((i_features - centers[i]) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - centers[i]) ** 2, axis=0)) ** 1.5, 1e-12)
                #
        # power normalization, also called square-rooting normalization
        Vm = np.sign(Vm) * np.sqrt(np.abs(Vm))
        Vc = np.sign(Vc) * np.sqrt(np.abs(Vc))
        # Vs = np.sign(Vs) * np.sqrt(np.abs(Vs))
        # # L2 normalization
        # Vm /= np.maximum(np.linalg.norm(Vm, axis=1)[:, None], 1e-12)
        # Vc /= np.maximum(np.linalg.norm(Vc, axis=1)[:, None], 1e-12)
        # Vs /= np.maximum(np.linalg.norm(Vs, axis=1)[:, None], 1e-12)
        # V_all = np.vstack((Vm, Vc, Vs)).flatten()[None, :]
        V_all = np.vstack((Vm, Vc)).flatten()[None, :]#拼接到一起,先是合并到一起(128,160),然后转成一维
        l_descrs = V_all if len(l_descrs) == 0 else np.concatenate((l_descrs, V_all), axis=0)

    for features in m_features:
        features = features.get().T

        new_features = np.zeros((features.shape[0], features.shape[1]), dtype=np.float32)
        new_features[:, :] = features[:, :]
        predicted_labels = kmeans_quantize(data=new_features, centers=centers)
        n_cluster = centers.shape[0]
        [n_patch, n_feature] = features.shape

        Vm = np.zeros([n_cluster, n_feature], dtype=np.float32)
        Vc = np.zeros([n_cluster, n_feature], dtype=np.float32)
        # Vs = np.zeros([n_cluster, n_feature], dtype=np.float32)
        for i in range(n_cluster):
            Ni = np.sum(predicted_labels == i)
            if Ni > 0:
                i_features = features[predicted_labels == i, :]
                mi = np.mean(i_features, axis=0)
                Vm[i] = Ni * (mi - centers[i])
                Vc[i] = (1 / Ni) * np.sum((i_features - mi) ** 2, axis=0) - (1 / Ni) * np.sum(
                    (i_features - centers[i]) ** 2, axis=0)
                # Vs[i] = ((1 / Ni) * (np.sum((i_features - mi) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - mi) ** 2, axis=0)) ** 1.5, 1e-12) - (
                #                 (1 / Ni) * (np.sum((i_features - centers[i]) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - centers[i]) ** 2, axis=0)) ** 1.5, 1e-12)
        # power normalization, also called square-rooting normalization
        Vm = np.sign(Vm) * np.sqrt(np.abs(Vm))
        Vc = np.sign(Vc) * np.sqrt(np.abs(Vc))
        # Vs = np.sign(Vs) * np.sqrt(np.abs(Vs))
        # # L2 normalization
        # Vm /= np.maximum(np.linalg.norm(Vm, axis=1)[:, None], 1e-12)
        # Vc /= np.maximum(np.linalg.norm(Vc, axis=1)[:, None], 1e-12)
        # Vs /= np.maximum(np.linalg.norm(Vs, axis=1)[:, None], 1e-12)
        # V_all = np.vstack((Vm, Vc, Vs)).flatten()[None, :]
        V_all = np.vstack((Vm, Vc)).flatten()[None, :]
        m_descrs = V_all if len(m_descrs) == 0 else np.concatenate((m_descrs, V_all), axis=0)

    for features in s_features:
        features = features.get().T

        new_features = np.zeros((features.shape[0], features.shape[1]), dtype=np.float32)
        new_features[:, :] = features[:, :]
        predicted_labels = kmeans_quantize(data=new_features, centers=centers)
        n_cluster = centers.shape[0]
        [n_patch, n_feature] = features.shape

        Vm = np.zeros([n_cluster, n_feature], dtype=np.float32)
        Vc = np.zeros([n_cluster, n_feature], dtype=np.float32)
        # Vs = np.zeros([n_cluster, n_feature], dtype=np.float32)
        for i in range(n_cluster):
            Ni = np.sum(predicted_labels == i)
            if Ni > 0:
                i_features = features[predicted_labels == i, :]
                mi = np.mean(i_features, axis=0)
                Vm[i] = Ni * (mi - centers[i])
                Vc[i] = (1 / Ni) * np.sum((i_features - mi) ** 2, axis=0) - (1 / Ni) * np.sum(
                    (i_features - centers[i]) ** 2, axis=0)
                # Vs[i] = ((1 / Ni) * (np.sum((i_features - mi) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - mi) ** 2, axis=0)) ** 1.5, 1e-12) - (
                #                 (1 / Ni) * (np.sum((i_features - centers[i]) ** 3, axis=0))) / np.maximum(
                #     ((1 / Ni) * np.sum((i_features - centers[i]) ** 2, axis=0)) ** 1.5, 1e-12)
        # power normalization, also called square-rooting normalization
        Vm = np.sign(Vm) * np.sqrt(np.abs(Vm))
        Vc = np.sign(Vc) * np.sqrt(np.abs(Vc))
        # Vs = np.sign(Vs) * np.sqrt(np.abs(Vs))
        # # L2 normalization
        # Vm /= np.maximum(np.linalg.norm(Vm, axis=1)[:, None], 1e-12)
        # Vc /= np.maximum(np.linalg.norm(Vc, axis=1)[:, None], 1e-12)
        # Vs /= np.maximum(np.linalg.norm(Vs, axis=1)[:, None], 1e-12)
        # V_all = np.vstack((Vm, Vc, Vs)).flatten()[None, :]
        V_all = np.vstack((Vm, Vc)).flatten()[None, :]
        s_descrs = V_all if len(s_descrs) == 0 else np.concatenate((s_descrs, V_all), axis=0)

    descrs = (l_descrs + m_descrs + s_descrs)/3
    return descrs.astype(np.float32)