def featureSearch(queryImage,feats): # 提取查询图像的特征,计算 simlarity 评分和排序 queryVec = extract_feat (queryImage) scores = np.dot (queryVec, feats.T) # 计算点积(内积),计算图像得分 # 矩阵乘法并把(纵列)向量当作n×1 矩阵,点积还可以写为:a·b=a^T*b。 # 点积越大,说明向量夹角越小。点积等于1,则向量为同向,向量夹角0度。 rank_ID = np.argsort (scores)[::-1] # 排序,倒序,大到小 rank_score = scores[rank_ID] # 计算评分 # print("scores",scores,type(scores)) # print ("rank_ID",rank_ID,type(rank_ID)) # print ("rank_score",rank_score,type(rank_score)) return rank_ID,rank_score
def etlFeature(post, img_list, h5filename): # 迭代方式,提取特征值写入h5文件 bar = ProgBar(len(img_list), monitor=True, title="提取图片特征,Image Total:%d" % len(img_list)) for i, img_path in enumerate(img_list): norm_feat = extract_feat(img_path) img_name = os.path.split(img_path)[1] names = [] names.append(img_name) feats2 = np.array(norm_feat) try: wH5FileData(i + post, feats2, names, h5filename) except: print("Feats Write Error") return 1 bar.update() # print ("提取图片特征!进度: %d/%d" % ((i + 1), len (img_list))) print(bar) return 0
Extract features and index the images ''' if __name__ == "__main__": db = args["database"] img_list = get_imlist(db) print ("--------------------------------------------------") print (" feature extraction starts") print ("--------------------------------------------------") feats = [] names = [] for i, img_path in enumerate(img_list): norm_feat = extract_feat(img_path) img_name = os.path.split(img_path)[1] feats.append(norm_feat) names.append(img_name) print ("extracting feature from image No. %d , %d images in total" %((i+1), len(img_list))) feats = np.array(feats) # names = np.array(names) # directory for storing extracted features output = args["index"] print( "--------------------------------------------------") print (" writing feature extraction results ...") print ("--------------------------------------------------") print(output) print(feats.shape)
imgNames = h5f['dataset_2'][:] h5f.close() print "--------------------------------------------------" print " searching starts" print "--------------------------------------------------" # read and show query image queryDir = args["query"] queryImg = mpimg.imread(queryDir) plt.title("Query Image") plt.imshow(queryImg) plt.show() # extract query image's feature, compute simlarity score and sort queryVec = extract_feat(queryDir) scores = np.dot(queryVec, feats.T) rank_ID = np.argsort(scores)[::-1] rank_score = scores[rank_ID] #print rank_ID #print rank_score # number of top retrieved images to show maxres = 3 imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])] print "top %d images in order are: " % maxres, imlist # show top #maxres retrieved result one by one for i, im in enumerate(imlist): image = mpimg.imread(args["result"] + "/" + im) plt.title("search output %d" % (i + 1))
h5f.close() print "--------------------------------------------------" print " searching starts" print "--------------------------------------------------" # read and show query image queryDir = args["query"] queryImg = mpimg.imread(queryDir) plt.title("Query Image") plt.imshow(queryImg) plt.show() # extract query image's feature, compute simlarity score and sort queryVec = extract_feat(queryDir) scores = np.dot(queryVec, feats.T) rank_ID = np.argsort(scores)[::-1] rank_score = scores[rank_ID] #print rank_ID #print rank_score # number of top retrieved images to show maxres = 3 imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])] print "top %d images in order are: " %maxres, imlist # show top #maxres retrieved result one by one for i,im in enumerate(imlist):
def process_classify_1_by_2(request): # 获取页面传来的数据 if request.method == "POST": data = request.POST.get("image") print(data) data = data[1:] label = str(request.POST.get("label")) # print(label) # print(data) # 相似性查询,调用训练的相似性模型来查找相似的图片 h5f = h5py.File('quanshanshui_index6688', 'r') feats = h5f['dataset_1'][:] imgNames = h5f['dataset_2'][:] h5f.close() queryDir = data queryVec = extract_feat(queryDir) scores = np.dot(queryVec, feats.T) rank_ID = np.argsort(scores)[::-1] maxres = 20 i = 0 imlist1 = [ imgNames[index] for i, index in enumerate(rank_ID[1:maxres]) ] print("*****************************************") print(imlist1) images = {} yuantu = {} result = [] i = 0 # 查找到相似图,并计算出框在原图的位置 for j in imlist1: j = str(j) j = j[2:-5] print(j) obj = models.Psection.objects.filter(sectionname=str(j)) if obj.exists(): # print("**********") str1 = "/" + str(obj[0].section_url) label2 = str(obj[0].label) # print(str1) # print(label2) # print(label) if label == label2: images[i] = str1 obj2 = models.Painting.objects.filter(id=obj[0].pid) gg = "/media/" + str(obj2[0].picture_url) filename = obj2[0].filename print("filenme", filename) border_list = [] yu_width = obj2[0].width yu_height = obj2[0].height xmin = obj[0].xmin ymin = obj[0].ymin xmax = obj[0].xmax ymax = obj[0].ymax border_width = xmax - xmin border_height = ymax - ymin # print("______________") # print(obj2[0].id) # print(filename) # print("图的宽度 %s"%yu_width) # print("图的高度 %s"%yu_height) # print("$$$$$$$$$") # print("xmin %s"%xmin) # print("xmax %s"%xmax) # print("边框的宽度%s"%border_width) # print("ymin %s"%ymin) # print("ymax %s" %ymax) # print("边框的高度%s"%border_height) # print("$$$$$$$$$") h_w_rate = 1 w_h_rate = 1 h_rate = 1 w_rate = 1 if yu_width > yu_height: w_h_rate = yu_width / yu_height print("w_h_rate %s" % w_h_rate) if yu_width > 590: w_rate = yu_width / 590 h_rate = yu_height / (590 / w_h_rate) # print("yu_width > 590:") # print("w_rate%s"%w_rate) # print("h_rate%s"%h_rate) elif yu_height > 700: # print("yu_height > 700:") # print("w_rate %s" %w_rate) # print("h_rate%s" %h_rate) h_rate = yu_height / 700 w_rate = yu_width / (700 / w_h_rate) else: h_w_rate = yu_height / yu_width print("h_w_rate %s" % h_w_rate) if yu_height > 700: h_rate = yu_height / 700 w_rate = yu_width / (700 / h_w_rate) # print("yu_height > 700:") # print("w_rate%s" % w_rate) # print("h_rate%s" % h_rate) elif yu_width > 590: w_rate = yu_width / 590 h_rate = yu_height / (590 / h_w_rate) # print("yu_width > 590:") # print("w_rate%s" % w_rate) # print("h_rate%s" % h_rate) # print(h_rate) # print(w_rate) border_height = border_height / h_rate border_width = border_width / w_rate xmin = xmin / w_rate ymin = ymin / h_rate # print("xmin %s" % xmin) # print("ymin %s" % ymin) # print("边框的宽度%s" % border_width) # print("边框的高度%s" % border_height) # print("______________") border_list.append(xmin) border_list.append(ymin + 55) border_list.append(border_width) border_list.append(border_height) border_list.append(filename) border_list.append(gg) yuantu[i] = border_list i += 1 # images[0]='/media/tikuan/tikuan13001203.jpg' # images[1]='/media/yinzhang/yinzhang1513432.jpg' # images[2]='/media/fantou/fantou12005074.jpg' print("相似查询") print(images) print(yuantu) print("相似查询") content = render_to_string("divtwo.html", { "image123": images, "yuantu": yuantu }) result.append(content) return HttpResponse("".join(result))
Extract features and index the images ''' if __name__ == "__main__": db = args["database"] img_list = get_imlist(db) print "--------------------------------------------------" print " feature extraction starts" print "--------------------------------------------------" feats = [] names = [] for i, img_path in enumerate(img_list): norm_feat = extract_feat(img_path) img_name = os.path.split(img_path)[1] feats.append(norm_feat) names.append(img_name) print "extracting feature from image No. %d , %d images in total" %((i+1), len(img_list)) feats = np.array(feats) # directory for storing extracted features output = args["index"] print "--------------------------------------------------" print " writing feature extraction results ..." print "--------------------------------------------------" h5f = h5py.File(output, 'w') h5f.create_dataset('dataset_1', data = feats)