def get_per_video_sift_data_old(split, subsample=1, nr_clusters=64, color=10): """ Loads the SIFT descriptors per video. `color` is a special flag; if it is set to 10 the descriptors are loaded unnormalized. """ filename = ("/home/lear/oneata/tmp/trecvid11_sift/" "sift_per_video_%s_subsample%d_k%d_color%d.raw" % ( split, subsample, nr_clusters, color)) if os.path.exists(filename): # Load data from cache file. print "Load per video data", filename with open(filename, "r") as ff: video_data = np.load(ff) video_labels = np.load(ff) video_names = cPickle.load(ff) return video_data, video_labels, video_names data, labels, limits = descriptors.load_dan_split( split, subsample, nr_clusters, color) # Get uniform weights within each video. weights = _normalize(np.ones_like(labels, dtype=np.float), limits, 'L1') # Load or compute data. video_data = aggregate(data, weights, limits) video_labels = np.array([labels[low] for low in limits[:-1]]) video_names = descriptors.vid_names_dan_split(split) # Save data to file. with open(filename, "w") as ff: np.save(ff, video_data) np.save(ff, video_labels) cPickle.dump(video_names, ff) return video_data, video_labels, video_names
def test_chunk_normalization(self): result = np.zeros_like(self.scores) for limits in chunker(self.limits, 2): low = limits[0] high = limits[-1] result[low:high] = _normalize(self.scores[low:high], limits - low) assert_allclose(result, self.norm_scores)
def test_normalize_scores_by_number(self): rr = _normalize(self.scores, self.limits, "L0") assert_allclose(rr, self.norm_scores_by_nr)
def test_normalize_scores(self): results = _normalize(self.scores, self.limits) assert_allclose(results, self.norm_scores)
def test_normalize_scores_by_number(self): rr = _normalize(self.scores, self.limits, 'L0') assert_allclose(rr, self.norm_scores_by_nr)