Esempio n. 1
0
def reranking(Q, data, inds, names, top_k = 50):
    vecs_sum = data[0, :]
    for i in range(1, top_k):
        vecs_sum += data[inds[i], :]
    vec_mean = vecs_sum/float(top_k)
    Q = normalize(Q - vec_mean)
    for i in range(top_k):
        data[i, :] = normalize(data[i, :] - vec_mean)
    sub_data = data[:top_k]
    sub_idxs, sub_rerank_dists, sub_rerank_names = compute_cosin_distance(Q, sub_data, names[:top_k])
    names[:top_k] = sub_rerank_names
    return names
Esempio n. 2
0
def simple_query_expansion(Q, data, inds, top_k=5):
    for i in range(top_k):
        temp = np.zeros(Q.shape)
        # print(data.shape)
        for k, j in enumerate(inds.T[i]):
            temp[k] = data[j]
        Q += (1.0 * (top_k - i) / float(top_k)) * temp
    return normalize(Q)
Esempio n. 3
0
def extract_crow_feature(img, cls_model):

    output = cls_model(img)
    output_cpu = output.cpu().data.numpy()
    X = output_cpu.squeeze()
    f_1 = crow.normalize(crow.apply_crow_aggregation(X)).flatten()

    return f_1
Esempio n. 4
0
def simple_query_expansion(Q, data, inds, top_k=10):
    """
    Get the top-k closest vectors, average and re-query

    :param ndarray Q:
        query vector
    :param ndarray data:
        index data vectors
    :param ndarray inds:
        the indices of index vectors in ascending order of distance
    :param int top_k:
        the number of closest vectors to consider

    :returns ndarray idx:
        the indices of index vectors in ascending order of distance
    :returns ndarray dists:
        the squared distances
    """
    Q += data[inds[:top_k],:].sum(axis=0)
    return normalize(Q)
Esempio n. 5
0
def simple_query_expansion(Q, data, inds, top_k=10):
    """
    Get the top-k closest vectors, average and re-query

    :param ndarray Q:
        query vector
    :param ndarray data:
        index data vectors
    :param ndarray inds:
        the indices of index vectors in ascending order of distance
    :param int top_k:
        the number of closest vectors to consider

    :returns ndarray idx:
        the indices of index vectors in ascending order of distance
    :returns ndarray dists:
        the squared distances
    """
    Q += data[inds[:top_k], :].sum(axis=0)
    return normalize(Q)
def extract_feat_3_yalers(img_path):
    model =load_model(dataPath+'/f1cn_model.49-1.613893.hdf5')
    layer_1 = K.function([model.layers[0].input], [model.layers[7].output])    
    img = image.load_img(img_path, target_size=(224,224))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)    
    f1 = layer_1([img])[0]
    feat_5=convert_kernel(f1[0].T)
    feature_crow_5=apply_crow_aggregation(feat_5)
    feature_norm_5=normalize(feature_crow_5)
    return np.sqrt(feature_norm_5)
    
    
    
    
    
    
    
        
Esempio n. 7
0
def load_face_2(face_data1, face_data2):
    face_dict = {}
    movie_list = []
    for movie, info in face_data1.items():
        movie_list.append(movie)
        casts = info['cast']
        candidates = info['candidates']

        cast_ids, cast_ffeats = [], []
        for index, cast in enumerate(casts):
            feat1 = cast['ffeat']
            feat2 = face_data2[movie]['cast'][index]['ffeat']
            assert cast['id'] == face_data2[movie]['cast'][index]['id']
            feat = np.hstack((feat1 * 0.5, feat2 * 0.5))
            # feat = normalize(feat)
            cast_ffeats.append(feat)
            cast_ids.append(cast['id'])
        cast_ffeats = np.array(cast_ffeats)

        candi_f_ids, candi_f_ffeats = [], []
        for index, candidate in enumerate(candidates):
            if candidate['ffeat'] is not None:
                feat1 = candidate['ffeat']
                feat2 = face_data2[movie]['candidates'][index]['ffeat']
                assert candidate['id'] == face_data2[movie]['candidates'][
                    index]['id']
                feat = np.hstack((feat1, feat2))
                feat = normalize(feat)
                candi_f_ids.append(candidate['id'])
                candi_f_ffeats.append(feat)
        candi_f_ffeats = np.array(candi_f_ffeats)

        face_dict.update({
            movie: {
                'cast_ids': cast_ids,
                'cast_ffeats': cast_ffeats,
                'candi_f_ids': candi_f_ids,
                'candi_f_ffeats': candi_f_ffeats,
            }
        })
    return face_dict, movie_list
Esempio n. 8
0
def simple_query_expansion(Q, data, inds, top_k=10):
    Q += data[inds[:top_k], :].sum(axis=0)
    return normalize(Q)
Esempio n. 9
0
def query_images(groundtruth_dir, image_dir, dataset, cropped=True):
    """
    Extract features from the Oxford or Paris dataset.
    :param str groundtruth_dir:
        the directory of the groundtruth files (which includes the query files)
    :param str image_dir:
        the directory of dataset images
    :param str dataset:
        the name of the dataset, either 'oxford' or 'paris'
    :param bool cropped:
        flag to optionally disable cropping
    :yields Image img:
        the Image object
    :yields str query_name:
        the name of the query
    """
    imgs = []
    query_names = []
    fake_query_names = []
    feats_crop = [] 
   
    modelDir = "/home/yuanyong/py/crow_retrieval/model"
    MODEL = "vgg.model"
    PROTO = "pool5.prototxt"
    caffemodel = os.path.join(modelDir, MODEL)
    prototxt = os.path.join(modelDir, PROTO)

    # set gpu card
    layer = 'pool5'
    caffe.set_device(6)
    caffe.set_mode_gpu()
    # init NN
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    net.forward()

    for f in glob.iglob(os.path.join(groundtruth_dir, '*_query.txt')):
        fake_query_name = os.path.splitext(os.path.basename(f))[0].replace('_query', '')
        fake_query_names.append(fake_query_name)

        query_name, x, y, w, h = open(f).read().strip().split(' ')

        if dataset == 'oxford':
            query_name = query_name.replace('oxc1_', '')
            query_names.append('%s.jpg' % query_name)
        img = cv2.imread(os.path.join(image_dir, '%s.jpg' % query_name), 1) # BGR

        if cropped:
            x, y, w, h = map(float, (x, y, w, h))
            x, y, w, h = map(lambda d: int(round(d)), (x, y, w, h))
        else:
            x, y, w, h = (0, 0, img.shape[1], img.shape[0])
        img = img[y:y+h, x:x+w]
        d = np.float32(img)
        # VggNet
        d -= np.array((104.00698793, 116.66876762, 122.67891434))
        d = d.transpose((2, 0, 1))
        feat = extract_raw_features(net, layer, d)
        #feat = extract_multi_raw_features(net, layer, d)
        feat = apply_crow_aggregation(feat)
        # L2-normalize feature
        feat = normalize(feat, copy=False)
        feats_crop.append(feat)
        imgs.append(img)
    return imgs, feats_crop, query_names, fake_query_names
Esempio n. 10
0
    do_QE = True
    topK = 10
    do_crop = True
    do_pca = True
    do_rerank = False
    redud_d = 256

    # load all features
    start = timeit.default_timer()
    files =  glob.glob(feats_files)
    feats, names = load_files(files)
    stop = timeit.default_timer()
    print "load time: %f seconds\n" % (stop - start)

    # L2-normalize features
    feats = normalize(feats, copy=False)

    # PCA reduce dimension
    if do_pca:
        #import pickle
        #whitening_params = {}
        #if os.path.isfile('../model/pca_model.pkl'):
        #    with open( '../model/pca_model.pkl' , 'rb') as f:
        #        whitening_params['pca'] = pickle.load(f)
        _, whitening_params = run_feature_processing_pipeline(feats, d=redud_d, copy=True)
        feats, _ = run_feature_processing_pipeline(feats, params=whitening_params)

    imgs, query_feats, query_names, fake_query_names = query_images(gt_files, dir_images, 'oxford', do_crop)

    #print query_names    
    aps = []
    def extract_feat(self, img_path):
        """
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        """
        img = image.load_img(img_path)
        img = image.img_to_array(img)
        h, w, c = img.shape
        resize_h=h
        resize_w=w
        minlength=min(h,w)
        if minlength>224:
            beta=minlength/224
            resize_h=int(h/beta)
            resize_w=int(w/beta)           
        img=cv2.resize(img,(resize_h,resize_w))

        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)

        feat_0 = self.model_vgg_block1_conv2.predict(img)
        feat_0=convert_kernel(feat_0[0].T)
        feature_crow_0=apply_crow_aggregation(feat_0)
        feature_norm_0=normalize(feature_crow_0)
        feature_mean_norm_0=normalize(preprocessing.scale(feature_crow_0,axis=0, with_mean=True, with_std=False, copy=True))

        feat_1 = self.model_vgg_block2_conv2.predict(img)
        feat_1=convert_kernel(feat_1[0].T)
        feature_crow_1=apply_crow_aggregation(feat_1)
        feature_norm_1=normalize(feature_crow_1)
        feature_mean_norm_1=normalize(preprocessing.scale(feature_crow_1,axis=0, with_mean=True, with_std=False, copy=True))

        feat_2= self.model_vgg_block3_conv1.predict(img)
        feat_2=convert_kernel(feat_2[0].T)
        feature_crow_2=apply_crow_aggregation(feat_2)
        feature_norm_2=normalize(feature_crow_2)
        feature_mean_norm_2=normalize(preprocessing.scale(feature_crow_2,axis=0, with_mean=True, with_std=False, copy=True))

        feature_448=np.hstack((np.hstack((feature_crow_0.T,feature_crow_1.T)),feature_crow_2.T))
        feature_448_norm=np.hstack((np.hstack((feature_norm_0.T,feature_norm_1.T)),feature_norm_2.T))
        feature_448_mean_norm=np.hstack((np.hstack((feature_mean_norm_0.T,feature_mean_norm_1.T)),feature_mean_norm_2.T))
        
        feat_3 = self.model_vgg_block3_conv3.predict(img)
        feat_3=convert_kernel(feat_3[0].T)
        feature_crow_3=apply_crow_aggregation(feat_3)
        feature_norm_3=normalize(feature_crow_3)
        feature_mean_norm_3=normalize(preprocessing.scale(feature_crow_3,axis=0, with_mean=True, with_std=False, copy=True))

        feat_4 = self.model_vgg_block4_conv3.predict(img)
        feat_4=convert_kernel(feat_4[0].T)
        feature_crow_4=apply_crow_aggregation(feat_4)
        feature_norm_4=normalize(feature_crow_4)
        feature_mean_norm_4=normalize(preprocessing.scale(feature_crow_4,axis=0, with_mean=True, with_std=False, copy=True))

        feat_5= self.model_vgg_block5_conv3.predict(img)
        feat_5=convert_kernel(feat_5[0].T)
        feature_crow_5=apply_crow_aggregation(feat_5)
        feature_norm_5=normalize(feature_crow_5)
        feature_mean_norm_5=normalize(preprocessing.scale(feature_crow_5,axis=0, with_mean=True, with_std=False, copy=True))

        feature_1280=np.hstack((np.hstack((feature_crow_3.T,feature_crow_4.T)),feature_crow_5.T))
        feature_1280_norm=np.hstack((np.hstack((feature_norm_3.T,feature_norm_4.T)),feature_norm_5.T))
        feature_1280_mean_norm=np.hstack((np.hstack((feature_mean_norm_3.T,feature_mean_norm_4.T)),feature_mean_norm_5.T))
        #print(feature_norm.shape)
        #feature,pca_prams=run_feature_processing_pipeline(feature_norm)
        return np.hstack((feature_448.T,feature_1280.T)),np.hstack((feature_448_norm.T,feature_1280_norm.T)),np.hstack((feature_448_mean_norm.T,feature_1280_mean_norm.T))