def cluster(img_name, n_cluster=10, method='kmeans_his'):

    img_list = []
    desc_list = []
    desc_final = []
    threshold = 0.9

    orb = cv2.ORB_create(nfeatures=1000)
    #read the images
    n_img = len(img_name)

    for i in range(n_img):
        img_list.append(cv2.imread(img_name[i], cv2.IMREAD_GRAYSCALE))

    # make a distance matrix for evaluation
    # make a list of descriptors for k means
    img_dst = np.ones(shape=(n_img, n_img))
    img_values = {}
    for i in range(n_img):
        img_values[img_name[i]] = key_desc(orb, img_list[i])

    for i in range(n_img):
        kp1, desc1 = img_values[img_name[i]]
        if method == 'complete' or method == 'single' or method == 'average':
            for j in range(i + 1, n_img):
                kp2, desc2 = img_values[img_name[j]]
                dist = orb_features(kp1, kp2, desc1, desc2)
                dist1 = {
                    key: value
                    for key, value in dist.items() if (value <= threshold)
                }
                img_dst[i, j] = 1 / len(dist1)
                img_dst[j, i] = 1 / len(dist1)
        elif method == 'kmeans' or method == 'kmeans_his':
            desc_list.extend(desc1)
            desc_final.append(desc1)

    if method == 'average':
        cluster = AgglomerativeClustering(n_clusters=n_cluster,
                                          affinity='precomputed',
                                          linkage='average').fit(img_dst)
    if method == 'single':
        cluster = AgglomerativeClustering(n_clusters=n_cluster,
                                          affinity='precomputed',
                                          linkage='single').fit(img_dst)
    if method == 'complete':
        cluster = AgglomerativeClustering(n_clusters=n_cluster,
                                          affinity='precomputed',
                                          linkage='complete').fit(img_dst)

    if method == 'kmeans' or method == 'kmeans_his':
        np.random.seed(1)
        patch_kmeans = KMeans(n_clusters=n_cluster)
        cluster = patch_kmeans.fit(desc_list)

    if method == 'kmeans_his':
        preprocessed_image = []
        for desc in desc_final:
            histogram = build_histogram(desc, patch_kmeans)
            preprocessed_image.append(histogram)

        cluster = KMeans(n_clusters=n_cluster).fit(preprocessed_image)

    print_list = [[] for i in range(n_cluster)]
    for i in range(n_img):
        print_list[cluster.labels_[i]].append(img_name[i])

    return print_list
示例#2
0
      cv2.imwrite(output_img, new_img)
    else:
        print('error in finding the transformation matrix!')
         
    

  elif sys.argv[1] == 'part3':
    target_img = cv2.imread(sys.argv[2], cv2.IMREAD_GRAYSCALE)
    source_img = cv2.imread(sys.argv[3], cv2.IMREAD_GRAYSCALE)
    output_img = sys.argv[4]
    threshold = 0.8
    
    orb = cv2.ORB_create(nfeatures=500)
    kp1, desc1 = key_desc(orb, target_img)
    kp2, desc2 = key_desc(orb, source_img)
    dist = orb_features(kp1, kp2, desc1, desc2)

    dist1 = {key:value for key,value in dist.items() if (value <= threshold)}

    final_inliers, final_proj, flag = ransac(4, dist1, kp1, kp2)

    target_img = cv2.imread(sys.argv[2])
    source_img = cv2.imread(sys.argv[3])

    if flag != 0:
      new_img, min_x, max_x, min_y, max_y = bilinear_interpolation(source_img, np.linalg.inv(final_proj), target_img)
      panorama_img = panorama(new_img, target_img, min_x, max_x, min_y, max_y)
      cv2.imwrite(output_img, panorama_img)
    else:
      print('error in running RANSAC!')
    
#!/usr/local//bin/python3
"""
This is a sample code to run orb_features and draw matches between images
"""
from orb import orb_features
import cv2
from matches import drawMatches

img1 = cv2.imread(
    "/Users/rs/Documents/Indiana University/2nd Semester/CV/FInal project/data/pedestrian_crosswalk_sign/crosswalk0.jpg",
    cv2.IMREAD_GRAYSCALE)
img = cv2.imread(
    "/Users/rs/Documents/Indiana University/2nd Semester/CV/FInal project/data/pedestrian_crosswalk_sign/crosswalk2.jpg",
    cv2.IMREAD_GRAYSCALE)

dist, kp1, kp2, desc1, desc2 = orb_features(img, img1)

# Setting the threshold
threshold = 0.8
# subsetting the key points based on the thresholds
dist1 = {key: value for key, value in dist.items() if (value <= threshold)}

# This draws the key points between the images
drawMatches(img, img1, dist1, kp1, kp2)
示例#4
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from orb import orb_features, update_image
from voc_tree import constructTree
from matcher import *

N = 10  #训练字典图片的数量
K = 5  #聚类K类
L = 3  #字典树L层
n = 21  #测试的图片的数量
T = 1  #相似度阈值

image_descriptors = orb_features(N)  #提取特征
# print image_descriptors

FEATS = []

for feats in image_descriptors:
    FEATS += [np.array(fv, dtype='float32') for fv in feats]

FEATS = np.vstack(FEATS)  #将特征转化为np的数组
# print FEATS

treeArray = constructTree(K, L, np.vstack(FEATS))  #建立字典树,并打印结果
tree = Tree(K, L, treeArray)
# tree.build_tree(N, image_descriptors)
# print "the vector of image:"
# print tree.transform(2)
# print tree.imageIDs, tree.dbLengths