Exemplo n.º 1
0
def get_image_search(im_file):
    h5f = h5py.File('featureCNN.h5','r')
    feats = h5f['dataset_1'][:]
    imgNames = h5f['dataset_2'][:]
    h5f.close()

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.1 # 占用GPU10%的显存
    session = tf.Session(config=config)

    model = VGGNet()
    q_vector = model.extract_feat(im_file)
    #print("清除训练模型!")
    clear_session()
    tf.reset_default_graph()

    scores = np.dot(q_vector, feats.T)
    rank_ID = np.argsort(scores)[::-1]
    rank_score = scores[rank_ID]

    maxres = 5
    im_list = [str(imgNames[index].decode()) for i,index in enumerate(rank_ID[0:maxres])]
    
    im_score = [str(rank_score[i]) for i in range(maxres)]
    result_dict = dict(zip(im_list, im_score))
    return result_dict
Exemplo n.º 2
0
def index_all_pictures(database):
    img_list = get_imlist(database)

    print "--------------------------------------------------"
    print "         feature extraction starts"
    print "--------------------------------------------------"
    print img_list
    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        if img_path.split('/')[-1].find('_t') == -1:
            norm_feat = model.extract_feat(img_path)
            img_name = os.path.split(img_path)[1]
            feats.append(norm_feat)
            names.append(img_name)

    feats = np.array(feats)
    # directory for storing extracted features

    h5f = h5py.File('/S3bucket/static/index.h5', 'w')
    h5f.create_dataset('dataset_1', data=feats)
    h5f.create_dataset('dataset_2', data=names)
    h5f.close()
Exemplo n.º 3
0
def find_similar(pic_path, h5_name, database):
    h5f = h5py.File(h5_name, 'r')
    feats = h5f['dataset_1'][:]
    imgNames = h5f['dataset_2'][:]
    h5f.close()

    print "--------------------------------------------------"
    print "               searching starts"
    print "--------------------------------------------------"

    # init VGGNet16 model
    model = VGGNet()

    # extract query image's feature, compute simlarity score and sort
    queryVec = model.extract_feat(database + '/' + pic_path)
    scores = np.dot(queryVec, feats.T)
    rank_ID = np.argsort(scores)[::-1]
    rank_score = scores[rank_ID]
    #print rank_ID
    #print rank_score

    # number of top retrieved images to show
    maxres = 2
    imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])]
    return imlist
Exemplo n.º 4
0
def query(img, num):
    # read in indexed images' feature vectors and corresponding image names
    h5f = h5py.File("featureCNN.total", 'r')
    feats = h5f['dataset_1'][:]
    imgNames = h5f['dataset_2'][:]
    h5f.close()

    # read and show query image
    queryDir = img
    queryImg = mpimg.imread(queryDir)

    # init VGGNet16 model
    model = VGGNet()

    # extract query image's feature, compute simlarity score and sort
    queryVec = model.extract_feat(queryDir)
    scores = np.dot(queryVec, feats.T)
    rank_ID = np.argsort(scores)[::-1]
    rank_score = scores[rank_ID]
    # print rank_ID
    # print rank_score

    # number of top retrieved images to show
    maxres = num
    imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])]
    re = [str(im, 'utf-8') for im in imlist]
    return re
Exemplo n.º 5
0
def query(self,image):
    # 更改查询数据库
    h5f = h5py.File('featureCNN.h5', 'r')
    # h5f = h5py.File('new_feature.h5', 'r')
    feats = h5f['dataset_1'][:]
    # print(feats)
    imgNames = h5f['dataset_2'][:]
    # print(imgNames)
    h5f.close()
    # read and show query image
    #queryDir = args["query"]
    # queryImg = mpimg.imread(image)
    # plt.title("Query Image")
    # plt.imshow(queryImg)
    # plt.show()
    # 更改查询所用模型
    # init VGGNet16 model
    model = VGGNet()
    # model = test7.VGG()
    # extract query image's feature, compute simlarity score and sort
    queryVec = model.extract_feat(image)
    # 余弦相似度算法
    # vec = queryVec.reshape(-1,1)
    # cosscore = []
    # for i in range(10699):
    #       vec1 = feats[i].reshape(-1,1)
    #       cos = cosine_similarity(vec.T,vec1.T)
    #       cosscore.append(cos[0][0])
    Escore = []
    for i in range(10699):
        dist = linalg.norm(queryVec-feats[i])

        Escore.append(dist)
    scores = np.dot(queryVec, feats.T)
    # 欧式距离算法
    # print('查询图片特征向量:')
    # print(queryVec)

    rank_ID = np.argsort(scores)[::-1]
    # rank_ID1 = np.argsort(cosscore)[::-1]
    # rank_ID2 = np.argsort(Escore)[::-1]
    # rank_score = scores[rank_ID]
    # print rank_ID
    # print rank_score
    # number of top retrieved images to show
    maxres = 10
    imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])]
    print("top %d images in order are: " % maxres, imlist)
    # print(r'E:\graduation\image_retrieval\image_retrieval\database1' + '\\' + imlist[0].decode())
    return imlist
def import_images(folder_path):
    db = "total"
    folder = folder_path
    img_list = get_imlist(folder)
    output = "featureCNN.total"

    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        norm_feat = model.extract_feat(img_path)
        img_name = os.path.split(img_path)[1]
        feats.append(norm_feat)
        names.append(img_name)
        img = Image.open(folder + '/' + img_name)
        img.save(db + '/' + img_name)
        print("extracting feature from image No. %d , %d images in total" %
              ((i + 1), len(img_list)))

    h5f = h5py.File(output, 'r')
    names_np = h5f['dataset_2'][:]
    names_np = [str(i, 'utf-8') for i in names_np]
    print(len(names_np))
    names_np.extend(names)
    print(len(names_np))

    features_np = h5f['dataset_1'][:]
    features_np = list(features_np)
    print(len(features_np))
    features_np.extend(feats)
    print(len(features_np))
    features_np = np.array(features_np)
    h5f.close()

    h5f = h5py.File(output, 'w')
    h5f.create_dataset('dataset_1', data=features_np)
    h5f.create_dataset('dataset_2', data=np.string_(names_np))
    h5f.close()

    f1.processFolder(folder)
    f2.processFolder(folder)
Exemplo n.º 7
0
def query(query, maxres=10, index='/root/CSC4001/featureCNN.h5'):
	
	h5f = h5py.File(index,'r')
	# feats = h5f['dataset_1'][:]
	feats = h5f['dataset_1'][:]
	# print(feats)
	imgNames = h5f['dataset_2'][:]
	# print(imgNames)
	h5f.close()
			
	print("--------------------------------------------------")
	print("               searching starts")
	print("--------------------------------------------------")
		
	# read and show query image
	queryDir = query
	queryImg = mpimg.imread(queryDir)

	# init VGGNet16 model
	import tensorflow as tf

	# graph = tf.get_default_graph()
	# with graph.as_default():
	model = VGGNet()

	# extract query image's feature, compute simlarity score and sort
	queryVec = model.extract_feat(queryDir)
	scores = np.dot(queryVec, feats.T)
	rank_ID = np.argsort(scores)[::-1]
	rank_score = scores[rank_ID]
	#print rank_ID
	#print rank_score


	# number of top retrieved images to show
	# maxres = 5
	imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])]
	print("top %d images in order are: " %maxres, imlist)
	return imlist
Exemplo n.º 8
0
    def query(self, feats, imgNames, queryDir, label, N, Nrel, Nrel_all,
              query_path):
        print("--------------------------------------------------")
        print("               searching starts")
        print("--------------------------------------------------")
        # init VGGNet16 model
        model = VGGNet()
        # extract query image's feature, compute simlarity score and sort
        queryVec = model.extract_feat(queryDir)
        scores = np.dot(queryVec, feats.T)
        rank_ID = np.argsort(scores)[::-1]
        rank_score = scores[rank_ID]
        # print (scores)
        # print (rank_ID)
        # print (rank_score)

        maxres = N - 1
        imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])]
        #print("top %d images in order are: " %maxres, imlist)

        rank, AP = self.rank1(N, Nrel, Nrel_all, label, imlist)

        print('rank =', rank)

        # show top #maxres retrieved result one by one
        # for i,im in enumerate(imlist):
        #     if i > 9:
        #         break
        #     image = mpimg.imread(query_path + "/"+str(im, 'utf-8'))
        #     plt.subplot(1,10 , i + 1)
        #     plt.imshow(image)
        #
        #     plt.xticks([])
        #     plt.yticks([])
        #     #plt.title("search output %d" %(i+1))
        #     plt.imshow(image)
        # plt.show()
        return rank, AP
Exemplo n.º 9
0
def index():

    db = args["database"]
    img_list = get_imlist(db)

    print("--------------------------------------------------")
    print("         feature extraction starts")
    print("--------------------------------------------------")

    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        norm_feat = model.extract_feat(img_path)

        img_name = os.path.split(img_path)[1]
        feats.append(norm_feat)
        names.append(img_name.encode('utf-8'))
        print("extracting feature from image No. %d , %d images in total" %
              ((i + 1), len(img_list)))

    feats = np.array(feats)
    # print(feats)
    # directory for storing extracted features
    output = args["index"]

    print("--------------------------------------------------")
    print("      writing feature extraction results ...")
    print("--------------------------------------------------")

    h5f = h5py.File(output, 'w')
    h5f.create_dataset('dataset_1', data=feats)
    #h5f.create_dataset('dataset_2', data = names)
    h5f.create_dataset('dataset_2', data=np.string_(names))
    h5f.close()
Exemplo n.º 10
0
    del(dataSet[:])
    """
    starttime2 = datetime.datetime.now()
    print("reader datas ok! using time is", (starttime2 - starttime1).seconds)

    model = VGGNet()
    image_list_1000 = os.listdir(
        dataPath + "acmmm/Locality-sensitive-hashing-master/sample/testset_v1"
    )  #[0:100]#sample/sample_pic")
    image_list_1000.sort()
    image_list = image_list_1000
    #del(image_list_1000[:])
    for image in image_list:
        #queryVec = model.extract_feat(dataPath+file_class[class_index]+picture_name)
        image_path = dataPath + "acmmm/Locality-sensitive-hashing-master/sample/testset_v1/" + image
        queryVec_norm = model.extract_feat(image_path)
        #queryVec=np.hstack((np.hstack((k1*queryVec[0:256],k2*queryVec[256:768])),k3*queryVec[768:1280]))
        query_all.append(queryVec_norm)
    starttime3 = datetime.datetime.now()
    print("feature_extract is ok! using time is",
          (starttime3 - starttime2).seconds)
    #del(image_list[:])
    #A=np.array(dataSet)
    B = np.array(query_all)
    del (query_all[:])
    """
    f3=np.dot(B[:,0:256],A.T[0:256])
    f4=np.dot(B[:,256:768],A.T[256:768])
    f5=np.dot(B[:,768:1280],A.T[768:1280])
    
Exemplo n.º 11
0
def query_online(path):
    ap = argparse.ArgumentParser(description="This is a example program ")
    # ap.add_argument("-query", required = False, default='TestImages/0.png',
    #	help = "Path to query which contains image to be queried")
    # ap.add_argument("-index", required = False,default='LACEfeatureCNN.h5',
    # 	help = "Path to index")
    # ap.add_argument("-result", required = False,default='lace',
    # 	help = "Path for output retrieved images")
    # 总数据
    ap.add_argument("-index", required=False, default='model1.h5',
                    help="Path to index")
    ap.add_argument("-result", required=False, default='img',
                    help="Path for output retrieved images")

    args = vars(ap.parse_args())

    # 读入索引图像的特征向量和相应的图像名称
    h5f = h5py.File(args["index"], 'r')
    # feats = h5f['dataset_1'][:]
    feats = h5f['dataset_1'][:]
    print("111", feats)
    imgNames = h5f['dataset_2'][:]
    print("222", imgNames)
    h5f.close()

    # print("--------------------------------------------------")
    # print("               searching starts")
    # print("--------------------------------------------------")
    # print('----------**********-------------')
    # op = input("退出请输 exit,查询请输 enter : ")
    # if op == 'exit':
    #     break
    # else:
    # read and show query image
    # 设置多张图片共同显示
    # figure, ax = plt.subplots(4, 4)
    global index_t
    index_t = 1

    # init VGGNet16 model
    model = VGGNet()

    # 提取查询图像的功能,计算simlarity得分和排序
    queryVec = model.extract_feat(path)
    # print (queryVec)
    name=str(path).split('/')[10]
    print (name)
    scores = np.dot(queryVec, feats.T)
    rank_ID = np.argsort(scores)[::-1]
    rank_score = scores[rank_ID]
    #print(rank_score)

    # number of top retrieved images to show
    maxres = 15
    imlist=[]
    all_scores =[]
    for i in  range(0,imgNames.shape[0]):

        if (imgNames[i].split(b'_')[0]==imgNames[rank_ID[0]].split(b'_')[0]):
            all_scores.append(scores[i])
            imlist.append(imgNames[i])
    rank_index=np.argsort(all_scores)[::-1]
    all_scores.sort(reverse=True)
    # print(all_scores)
    # print (rank_index)
    # print (imlist)
    # print (imgNames[rank_ID[0]])
    sum=0
    imlist1 = [imlist[index] for i, index in enumerate(rank_index[0:maxres])]
    # print (imlist1)
    # for i in range(0,maxres):
    #     if(imlist1[i].split(b'_')[0]==imgNames[rank_ID[0]].split(b'_')[0]):
    #         sum=sum+1
    # print (sum)
    print("top %d images in order are: " % maxres, imlist1)

    # # show top #maxres retrieved result one by one
    # for i,im in enumerate(imlist):
    #     image = mpimg.imread(args["result"]+"/"+str(im, 'utf-8'))
    #     plt.title("search output %d" %(i+1))
    #     plt.imshow(image)
    #     plt.show()

    # 显示多张图片

    # for i, im in enumerate(imlist):
    #     image = mpimg.imread(args["result"] + "/" + str(im, 'utf-8'))
    #     im_name = str(im).split('\'')[1]
    #     ax[int((i + 1) / 4)][(i + 1) % 4].set_title('%d Image %s -- %.2f' % (i + 1, im_name, rank_score[i]),
    #                                                 fontsize=10)
    #     # ax[int(i/maxres)][i%maxres].set_title('Image_name is %s' % im,fontsize=2)
    #     ax[int((i + 1) / 4)][(i + 1) % 4].imshow(image, cmap=plt.cm.gray)
    #     ax[int((i + 1) / 4)][(i + 1) % 4].axis('off')
    # plt.show()
    return imlist1,args,all_scores
    query_file = csv.writer(query_csv)

    #model_org = VGGNet_org()
    model = VGGNet()
    file_order = []
    lost = []
    t = 0
    starttime1 = datetime.datetime.now()
    for i, img_path in enumerate(datas):
        if not img_path.strip() == "":
            try:
                #img_path=img_path.split(",")[1]
                path_picture = dataPath + file_class[int(img_path[4]) -
                                                     1] + img_path
                #norm_feat=model.extract_feat(path_picture)
                feature_norm_data = model.extract_feat(path_picture)
                #print("start!")
                #norm_feat_org=model_org.extract_feat(path_picture)
                #norm_feat=extract_feat_3_yalers(path_picture)
                file_order = [t, img_path]
                print(
                    "extracting feature num  from image No. %d , %d images in total"
                    % ((t), len(datas)), feature_norm_data.shape)
                t += 1
                #writer.writerow(feature_data)
                writer1.writerow(feature_norm_data)
                #writer_org.writerow(norm_feat_org)
                query_file.writerow(file_order)
                if t % 520 == 0:
                    starttime2 = datetime.datetime.now()
                    time_using = (starttime2 - starttime1).seconds
Exemplo n.º 13
0
        imname = line.strip()
        imno = int(imname[:-len(".jpg")])
        if imno % 100 == 0:
            gt.append(imname)
    return (gt)


gt = get_groundtruth()
print(gt)

# init VGGNet16 model
model = VGGNet()

for queryDir in gt:
    queryImg = mpimg.imread(str('database/' + queryDir))
    queryVec = model.extract_feat(str('database/' + queryDir))
    scores = np.dot(queryVec, feats.T)
    rank_ID = np.argsort(scores)[::-1]
    rank_score = scores[rank_ID]
    maxres = 10
    print('-----------------------------')
    imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:maxres])]

    f = open('peng_result.txt', "a")
    f.write(queryDir + ' ')
    for i, index in enumerate(rank_ID[0:maxres]):
        img = str(imgNames[index]).strip('b')
        img = img.strip('\'')
        f.write(' ' + str(i) + ' ' + str(img))
    f.write('\n')
Exemplo n.º 14
0
print("sdfsdf")
# 获取图片特征
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
file_path = "./files/zki611nmFGAAqpTAAvqHw7LRwM947.jpg"
queryVec = model.extract_feat(file_path)
feature = queryVec.tolist()
print(feature)
Exemplo n.º 15
0
print("--------------------------------------------------")
print("               searching starts")
print("--------------------------------------------------")

# read and show query image
queryDir = args["query"]
queryImg = mpimg.imread(queryDir)  #读取和代码处于同一目录下的 queryDir
plt.title("Query Image")
plt.imshow(queryImg)  # 显示图片
plt.show()

# init VGGNet16 model
model = VGGNet()

# extract query image's feature, compute simlarity score and sort提取查询图像的特征,计算简单度和排序
queryVec = model.extract_feat(queryDir)  #修改此处改变提取特征的网络
scores = np.dot(queryVec, feats.T)  #矩阵乘法
rank_ID = np.argsort(scores)[::-1]  #将scores倒排
rank_score = scores[rank_ID]
#print rank_ID
#print rank_score

# number of top retrieved images to show
maxres = 4
'''
imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])]
#print ("top %d images in order are: " %maxres , imlist)
print("top %d images in order are: "%maxres +" scores is: %f" %rank_score[i], imlist)
#print("image names: " + str(imgNames[index]) + " scores: %f" % rank_score[i])
'''
imlist = []
Exemplo n.º 16
0
    for i in range(0, len(classesAndNum)):
        classes.append(classesAndNum[i][0:3])

    querysNum = len(queryImgs)  #15247

    ap = np.zeros(querysNum)

    total_start = time.time()
    for i in range(0, querysNum):
        start = time.time()
        queryName = basepath + queryImgs[i]
        # print(queryName)
        queryClass = queryImgs[i][0:3]
        # print(queryClass)
        # extract query image's feature, compute simlarity score and sort
        queryFeat = model.extract_feat(queryName)
        # print(classesAndNum[0].size)
        queryClassNum = classesAndNum[classes.index(queryClass)].split(' ')[1]

        scores = np.dot(queryFeat, feats.T)
        rank_ID = np.argsort(scores)[::-1]
        rank_score = scores[rank_ID]

        # number of top retrieved images to showN
        imlist = [imgNames[index] for i, index in enumerate(rank_ID[0:N])]
        # print(imlist)

        similarTerm = 0
        precision = np.zeros(N)

        for k in range(0, N):
Exemplo n.º 17
0
class LOGO():
    def __init__(self):

        self.yolo = YOLO()
        self.model = VGGNet()
        self.feats, self.imgNames = self.read_databae()

    def read_databae(self):
        h5f = h5py.File('logo_feature.h5', 'r')
        # feats = h5f['dataset_1'][:]
        feats = h5f['dataset_1'][:]
        imgNames = h5f['dataset_2'][:]
        h5f.close()
        return feats, imgNames

    def get_imlist(self, path):
        return [
            os.path.join(path, f) for f in os.listdir(path)
            if f.endswith('.jpg')
        ]

    def create_database(self, data_path):
        db = data_path
        img_list = self.get_imlist(db)
        print("--------------------------------------------------")
        print("         feature extraction starts")
        print("--------------------------------------------------")
        feats = []
        names = []
        for i, img_path in enumerate(img_list):
            img = cv2.imread(img_path)
            norm_feat = self.model.extract_feat(img)
            img_name = os.path.split(img_path)[1]
            feats.append(norm_feat)
            names.append(img_name)
            print("extracting feature from image No. %d , %d images in total" %
                  ((i + 1), len(img_list)))
        feats = np.array(feats)

        # directory for storing extracted features
        output = 'logo_feature.h5'
        print("--------------------------------------------------")
        print("      writing feature extraction results ...")
        print("--------------------------------------------------")
        h5f = h5py.File(output, 'w')
        h5f.create_dataset('dataset_1', data=feats)

        h5f.create_dataset('dataset_2', data=np.string_(names))
        h5f.close()
        return True

    def search_img(self, query):
        # read in indexed images' feature vectors and corresponding image names
        print("--------------------------------------------------")
        print("               searching starts")
        print("--------------------------------------------------")

        # extract query image's feature, compute simlarity score and sort
        t2 = time.time()
        img = cv2.imread(query)
        image, logos = self.yolo.detect_image(img)

        for logo in logos:
            t4 = time.time()
            queryVec = self.model.extract_feat(logo)
            scores = np.dot(queryVec, self.feats.T)
            rank_ID = np.argsort(scores)[::-1]
            rank_score = scores[rank_ID]
            t5 = time.time() - t4
            print('t5=' + str(t5))
            print("-------------------------------------------")
            print(rank_ID)
            print(rank_score)
            print("-------------------------------------------")

            # number of top retrieved images to show
            imlist = [self.imgNames[index] for index in rank_ID[0:1]]
            file = 'database/' + str(imlist)[3:-2]
            t3 = time.time() - t2
            print('t3=' + str(t3))
            print(rank_score[0])
            return file, t3, rank_score[0]
Exemplo n.º 18
0
def search(request):
    query = request.POST['message']
    max_images = 20
    save_directory = os.path.join(BASE_DIR, 'database')
    query_directory = os.path.join(BASE_DIR, 'query')
    image_type = "Action"

    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "          Downloading Training images"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})

    if query not in os.listdir(save_directory):
        response = google_images_download.googleimagesdownload(
        )  #class instantiation
        arguments = {
            "keywords": query,
            "limit": max_images,
            "print_urls": True,
            "output_directory": save_directory
        }  #creating list of arguments
        paths = response.download(arguments)

    db = os.path.join(save_directory, query)
    img_list = get_imlist(db)

    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "          feature extraction starts"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})

    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        try:
            norm_feat = model.extract_feat(img_path)
            img_name = os.path.split(img_path)[1]
            feats.append(norm_feat)
            names.append(img_name)
            print("extracting feature from image No. %d , %d images in total" %
                  ((i + 1), len(img_list)))
            msg = "Extracting feature from image No." + str(
                i + 1) + " images in total " + str(len(img_list))
            pusher_client.trigger('texim', 'my-event', {'message': msg})
        except Exception:
            print "Skipping Unexpected Error:", sys.exc_info()[1]
            msg = "Skipping Unexpected Error:" + str(sys.exc_info()[1])
            pusher_client.trigger('texim', 'my-event', {'message': msg})
            pass

    feats = np.array(feats)
    names = np.array(names)
    # print(feats)
    # directory for storing extracted features
    # output = os.path.join(BASE_DIR,'feature.h5')

    print("--------------------------------------------------")
    print("      writing feature extraction results ...")
    print("--------------------------------------------------")

    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "      writing feature extraction results ..."
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})

    # FEATURE.h5
    # h5f = h5py.File(output, 'w')
    # h5f.create_dataset('dataset_1', data = feats)
    # # h5f.create_dataset('dataset_2', data = names)
    # h5f.create_dataset('dataset_2', data = names)
    # h5f.close()

    # # read in indexed images' feature vectors and corresponding image names
    # h5f = h5py.File(output,'r')
    # # feats = h5f['dataset_1'][:]
    # feats = h5f.get('dataset_1')
    # # print(feats)
    # feats = np.array(feats)
    # #imgNames = h5f['dataset_2'][:]
    # imgNames = h5f.get('dataset_2')
    # # print(imgNames)
    # imgNames = np.array(imgNames)
    #h5f.close()

    # print(feats)
    # print(imgNames)

    print("--------------------------------------------------")
    print("               searching starts")
    print("--------------------------------------------------")

    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "             searching starts"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})

    # read and show query image

    sites = []
    N = 5

    #Google search
    for url in ims(query, stop=13):
        print(url)
        sites.append(url)
    sites = sites[:N]
    print(sites)

    # sites = ['https://www.cars.com/',]
    total_img_scores = []
    doc_dic = []
    for site in sites:
        try:
            soup = bs(urllib2.urlopen(site), "html5lib")
            drc = ""
            for p in soup.find_all('p'):
                drc += p.getText()
            doc_dic.append(drc)
        except Exception:
            pass

    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "          Ranking documents on basis of tf-idf scores "
    pusher_client.trigger('texim', 'my-event', {'message': msg})
    msg = "--------------------------------------------------"
    pusher_client.trigger('texim', 'my-event', {'message': msg})

    doc_info = get_doc(doc_dic)
    fdl = create_freq_dicts(doc_dic)
    TF_score = computeTF(doc_info, fdl, query)
    IDF_score = computeIDF(doc_info, fdl, query)
    TFIDF_scores = computeTFIDF(TF_score, IDF_score)

    total_doc_scores = [0 for x in range(len(sites))]

    for el in TFIDF_scores:
        total_doc_scores[el['doc_id'] - 1] = el['TFIDF_score']

    total_doc_scores = np.array(total_doc_scores)
    total_doc_scores.reshape((1, -1))
    rank_ID2 = np.argsort(total_doc_scores)[::-1]
    rank_score2 = total_doc_scores[rank_ID2]
    maxres = N
    doclist = [sites[index] for i, index in enumerate(rank_ID2[0:maxres])]
    print("doclist")
    print(doclist)
    print(rank_score2)

    pusher_client.trigger('results', 'my-event', {"doclist": doclist})

    for site in sites:
        try:
            soup = bs(urllib2.urlopen(site), "html5lib")
            img_tags = soup.find_all('img')
            print(img_tags)

            queryDir = os.path.join(query_directory, str(sites.index(site)))
            os.mkdir(queryDir)
            print("directory created")

            urls = []
            for img in img_tags:
                try:
                    urls.append(img['src'])
                except Exception:
                    pass

            msg = "--------------------------------------------------"
            pusher_client.trigger('texim', 'my-event', {'message': msg})
            msg = "          Downloading Query Images for Site-" + str(
                sites.index(site) + 1)
            pusher_client.trigger('texim', 'my-event', {'message': msg})
            msg = "--------------------------------------------------"
            pusher_client.trigger('texim', 'my-event', {'message': msg})

            for url in urls:
                filename = re.search(r'/([\w_-]+[.](jpg|gif|png))$', url)
                try:
                    if 'http' not in url:
                        url = '{}{}'.format(site, url)
                    imgdata = urllib2.urlopen(url).read()
                    filname = basename(urlsplit(url)[2])
                    output = open(os.path.join(queryDir, filname), 'wb')
                    output.write(imgdata)
                    output.close()
                except Exception:
                    print "Skipping Unexpected Error:", sys.exc_info()[1]
                    pass

            img_list = get_imlist(queryDir)
            qfeats = []
            qnames = []

            model = VGGNet()
            for i, img_path in enumerate(img_list):
                try:
                    norm_feat = model.extract_feat(img_path)
                    img_name = os.path.split(img_path)[1]
                    qfeats.append(norm_feat)
                    qnames.append(img_name)
                except Exception:
                    print "Skipping Unexpected Error:", sys.exc_info()[1]
                    pass

            qfeats = np.array(qfeats)
            qnames = np.array(qnames)

            msg = "--------------------------------------------------"
            pusher_client.trigger('texim', 'my-event', {'message': msg})
            msg = "          Calculating Image Score for Site-" + str(
                sites.index(site) + 1)
            pusher_client.trigger('texim', 'my-event', {'message': msg})
            msg = "--------------------------------------------------"
            pusher_client.trigger('texim', 'my-event', {'message': msg})

            model = VGGNet()

            # extract query image's feature, compute simlarity score and sort
            if qfeats.any():
                scores = []
                scores = np.array(scores)
                for qD in feats:
                    #qV = model.extract_feat(qD)
                    if scores.any():
                        scores += np.dot(qD, qfeats.T)
                    else:
                        scores = np.dot(qD, qfeats.T)
            else:
                scores = [0]
                scores = np.array(scores)

            total_img_scores.append(np.sum(scores))
        except Exception:
            scores = [0]
            scores = np.array(scores)
            total_img_scores.append(np.sum(scores))
            pass

    total_img_scores = np.array(total_img_scores)
    total_img_scores.reshape((1, -1))
    rank_ID1 = np.argsort(total_img_scores)[::-1]
    rank_score1 = total_img_scores[rank_ID1]
    maxres = N
    imlist = [sites[index] for i, index in enumerate(rank_ID1[0:maxres])]
    print("imlist")
    print(imlist)
    print(rank_score1)
    shutil.rmtree(query_directory)
    os.mkdir(query_directory)
    image_type = "Action"

    final_scores = [sum(x) for x in zip(total_img_scores, total_doc_scores)]
    final_scores = np.array(final_scores)
    final_scores.reshape((1, -1))
    rank_ID3 = np.argsort(final_scores)[::-1]
    rank_score3 = final_scores[rank_ID3]

    totlist = [sites[index] for i, index in enumerate(rank_ID3[0:maxres])]
    print("totlist")
    print(totlist)
    print(rank_score3)

    pusher_client.trigger('results', 'my-event', {"totlist": totlist})
    K.clear_session()
    return render(request, 'texim/search_results.html', {
        "totlist": totlist,
        "doclist": doclist
    })
Exemplo n.º 19
0
'''
if __name__ == "__main__":

    db = args["database"]
    img_list = get_imlist(db)

    print("--------------------------------------------------")
    print("         feature extraction starts")
    print("--------------------------------------------------")

    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        norm_feat = model.extract_feat(img_path)
        img_name = os.path.split(img_path)[1]
        feats.append(norm_feat)
        names.append(img_name.encode())
        print("extracting feature from image No. %d , %d images in total" %
              ((i + 1), len(img_list)))

    feats = np.array(feats)
    # directory for storing extracted features
    output = args["index"]

    print("--------------------------------------------------")
    print("      writing feature extraction results ...")
    print("--------------------------------------------------")

    h5f = h5py.File(output, 'w')
print ("               searching starts")

    
# read and show query image
queryDir = args["query"]
queryImg = mpimg.imread(queryDir)
plt.title("Query Image")
plt.imshow(queryImg)
plt.show()

# init VGGNet16 model
model = VGGNet()

# extract query image's feature, compute simlarity score and sort
queryVec = model.extract_feat(queryDir)
scores = np.dot(queryVec, feats.T)
rank_ID = np.argsort(scores)[::-1]
rank_score = scores[rank_ID]
#print rank_ID
#print rank_score


# number of top retrieved images to show
maxres = 3
imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])]
print ("top %d images in order are: " %maxres, imlist)
 

# show top #maxres retrieved result one by one
for i,im in enumerate(imlist):
Exemplo n.º 21
0
if __name__ == "__main__":

    db = img_paths = 'list_eval_partition.txt'
    img_list = get_imlist(db)

    print("--------------------------------------------------")
    print("         feature extraction starts")
    print("--------------------------------------------------")

    feats = []
    names = []

    model = VGGNet()
    for i, img_path in enumerate(img_list):
        norm_feat = model.extract_feat(".." + img_path)
        #img_name = os.path.split(img_path)[1]
        feats.append(norm_feat)
        names.append(img_path)
        print("extracting feature from image No. %d , %d images in total" %
              ((i + 1), len(img_list)))

    feats = np.array(feats)
    output = 'featureCNN.h5'

    print("--------------------------------------------------")
    print("      writing feature extraction results ...")
    print("--------------------------------------------------")

    h5f = h5py.File(output, 'w')
    h5f.create_dataset('dataset_feat', data=feats)