Esempio n. 1
0
def getObjectMetric(pair, metric_name):
    if metric_name == 'self':
        return pair['object']
    if metric_name == '__path__':
        return getObjectPath(pair['object'])
    annotations = pair['annotations']
    for a in annotations:
        if metric_name in a:
            return a[metric_name]
Esempio n. 2
0
def getAnnotations(object, all_annotations_files):
	object_file_path = getObjectPath(object)
	annotations_files_paths_prefix = getAnnotationsFilesPrefix(object_file_path)
	possible_annotations_files_paths = [
		annotations_files_paths_prefix + '.txt',
		annotations_files_paths_prefix + '_left.txt',
		annotations_files_paths_prefix + '_right.txt'
	]
	annotations_files = [{
		'path': path,
		'content': all_annotations_files[path]
	} for path in filter(lambda p: p in all_annotations_files, possible_annotations_files_paths)]
	extracted_annotations = map(lambda c: extractAnnotations(object, c), annotations_files)
	annotations = filter(lambda a: a != None, extracted_annotations)
	return list(annotations)
Esempio n. 3
0
def countVideosInClasses(pairs, get_classes_function):
    videos_by_classes = {}
    for pair in tqdm(filter(isFrame, pairs),
                     desc='Counting videos in classes'):
        classes = get_classes_function(annotation['emotion'])
        for class_name in classes:
            if not (class_name in videos_by_classes):
                videos_by_classes[class_name] = {}
            video_path = getObjectPath(pair['object'])
            if not (video_path in videos_by_classes[class_name]):
                videos_by_classes[class_name][video_path] = True
    number_of_videos_by_classes = {}
    for class_name in videos_by_classes:
        number_of_videos_by_classes[class_name] = len(
            videos_by_classes[class_name])
    return number_of_videos_by_classes
Esempio n. 4
0
def printPairsPaths(pairs):
    for p in pairs:
        print(getObjectPath(p['object']))
Esempio n. 5
0
dataset_specific_api = getDatasetSpecificApi('experiment')
pairs = Pairs(dataset_dir,
              get_classes_function=dataset_specific_api.getClasses)
# pairs.filterBy({'__class__': ['happy']})

pairs.setReturnNames(True)
hogs_and_pairs = list(getPairsMetricsWithPairs(pairs, 'hog', threads=24))
pairs.setReturnNames(False)
hogs = np.array([e[0] for e in hogs_and_pairs])
print(hogs.shape[0], 'elements')
kmeans = KMeans(n_clusters=clusters_number, random_state=0).fit(hogs)

labels = kmeans.labels_
ids = np.array([e[1] for e in hogs_and_pairs])

result = {
    l: [ids[i] for i in np.where(labels == l)][0]
    for l in range(clusters_number)
}

for cluster_id in result:
    print(cluster_id)
    for object_id in result[cluster_id]:
        object = pairs[object_id]['object']
        print(getObjectPath(object))

# import torch
# from kmeans_pytorch import kmeans

# # kmeans
# cProfile.run("cluster_ids_x, cluster_centers = kmeans(X=torch.from_numpy(hogs), num_clusters=clusters_number, distance='cosine', device=torch.device('cuda'))")