Beispiel #1
0
def main(sigs_train, sigs_test):
    # Read the imagenet signatures from file
    paths_train, train_sigs = read_signatures(sigs_train)
    paths_test, test_sigs = read_signatures(sigs_test)

    # Solution

    # Find the mean signature for each person based on the training set
    person_ids = np.array([int(p.split('/')[0][7:]) for p in paths_train])
    train_person_sigs = split_by(train_sigs, person_ids)
    train_person_sigs = np.vstack(
        [np.mean(ts, axis=0) for ts in train_person_sigs])

    # Find the mean signature for each test sequence
    seq_ids = np.array([int(p.split('/')[0][4:]) for p in paths_test])
    test_seq_sigs = split_by(test_sigs, seq_ids)
    test_seq_sigs = np.vstack([np.mean(ts, axis=0) for ts in test_seq_sigs])

    # Predict classes using cosine similarity
    similarity_matrix = cosine_similarity(test_seq_sigs, train_person_sigs)

    # Crate a submission - a sorted list of predictions, best match on the left.
    ranking = similarity_matrix.argsort(axis=1)
    submission = [line.tolist() for line in ranking[:, :-6:-1]]

    # submit to server, print reply (-1 means something is wrong)
    print(submit('naive', submission))
Beispiel #2
0
def main(sigs_path, images_path, samples_per_person=16):
    # Read the imagenet signatures from file
    paths, signatures = read_signatures(sigs_path)
    # Enumerate the frame paths based on person and video
    person_ids, video_ids = enumerate_paths(paths)
    # Sample "samples_per_person" images from each person
    sampled_indices = [
        pid for pp in split_by(range(len(paths)), person_ids)
        for pid in sorted(np.random.choice(pp, samples_per_person).tolist())
    ]
    sampled_paths = [paths[idx] for idx in sampled_indices]
    sampled_labels = np.mgrid[:len(sampled_indices
                                   ), :samples_per_person][0].ravel()
    # Get images of sampled data points
    with Images(images_path) as images:
        sampled_images = [images[path] for path in sampled_paths]
    sampled_images = np.concatenate([sampled_images]).transpose([0, 3, 1, 2])
    # Get normalized signatures of sampled data points
    sampled_sigs = signatures[sampled_indices]
    sampled_sigs /= np.sqrt(
        np.sum(np.square(sampled_sigs), axis=1, keepdims=True))
    # Write data to tensorboard projector
    writer = SummaryWriter()
    meta_data = [sp.split('/')[0] for sp in sampled_paths]
    label_img = torch.from_numpy(sampled_images).float() / 255
    writer.add_embedding(torch.from_numpy(sampled_sigs),
                         metadata=meta_data,
                         label_img=label_img)
    print('Visualization ready')
    print('run: \t tensorboard --logdir=runs')
def main(sigs_path, submission_path, train_to_test_ratio=0.5):
    # Read the imagenet signatures from file
    paths, signatures = read_signatures(sigs_path)
    # Enumerate the frame paths based on person and video
    person_ids, video_ids = enumerate_paths(paths)
    # For each person, split his set of videos to train and test
    train_indices, test_indices = train_test_split(person_ids, video_ids,
                                                   train_to_test_ratio)

    # Solution

    # Find the mean signature for each person based on the training set
    train_sigs = split_by(signatures[train_indices], person_ids[train_indices])
    train_sigs = np.vstack([np.mean(ts, axis=0) for ts in train_sigs])

    # Find the mean signature for each test - video and assign its ground-truth person id
    test_sigs = split_by(signatures[test_indices], video_ids[test_indices])
    test_sigs = np.vstack([np.mean(ts, axis=0) for ts in test_sigs])
    # Ground truth labels
    test_labels = np.array([
        pids[0]
        for pids in split_by(person_ids[test_indices], video_ids[test_indices])
    ])

    # Predict classes using cosine similarity
    similarity_matrix = cosine_similarity(test_sigs, train_sigs)

    # Crate a submission - a sorted list of predictions, best match on the left.
    ranking = similarity_matrix.argsort(axis=1)
    submission = [line.tolist() for line in ranking[:, :-6:-1]]

    # Compute and display top 1 / 5 accuracies
    evaluate(submission, test_labels)
Beispiel #4
0
from data import Images
import os

data_path = os.path.join(os.path.dirname(__file__), 'data')
with Images(os.path.join(data_path, 'images.tar')) as images:
    path = images.paths[3]
    image = images[path]
    print("read image {} of shape {}".format(path, image.shape))
# read image "person_0013/channel_0081/seq

from data import read_pose
#paths, keypoints, scores = read_pose('pose.pkl')
paths, keypoints, scores = read_pose(os.path.join(data_path, 'pose.pkl'))
print(paths.__len__())
print(keypoints.shape)
print(scores.shape)

from data import read_signatures
pathsSig, signatures = read_signatures(
    os.path.join(data_path, 'signatures.pkl'))
print(pathsSig.__len__())
print(signatures.shape)
Beispiel #5
0
from tensorflow.keras.models import Model
import data
from data import Images
from common import resolve_single
import matplotlib.pyplot as plt
from scipy.stats import mode
"""Upload super res GAN libs"""
from model.srgan import generator
from model.cnn import CNN
from utils import tensor2numpy, shuffle, devide, create_onehot, per_label, devide_submission, train_model
import numpy as np
from sklearn.preprocessing import OneHotEncoder

"""Upload images, poses, signatures"""
poses = data.read_pose('./data/pose.pkl')
signatures = data.read_signatures('./data/signatures.pkl')
with Images('data/images.tar') as images:
    path = images.paths[20000]
    image = images._getitem(path)
    print ('read image {} of shape {}'.format(path, image.shape))


my_split=poses[0]
my_split=[path[:-4] for path in my_split]


"""Use SRGAN"""
srgan = generator()
srgan.load_weights('weights/srgan/gan_generator.h5')