Esempio n. 1
0
def main(pose_path, submission_path, train_to_val_ratio=0.5):
    # Read the imagenet signatures from file
    paths_test, test_pose, test_scores = read_pose(pose_path)
    test_pose = test_pose[:, :13, :]
    test_pose = np.reshape(test_pose, (test_pose.shape[0], 26))

    seq_ids = np.array([int(p.split('/')[0][4:]) for p in paths_test])
    test_seq_pose = split_by(test_pose, seq_ids)

    print(test_seq_pose.__len__())
    sequence_sz = 20  #20

    test_pose = test_seq_pose
    test_pose2 = []
    for testpose in test_pose:
        test_pose2.append(testpose[np.random.choice(testpose.shape[0],
                                                    sequence_sz)])

    X_test = np.array(test_pose2)

    # Load in model and evaluate on validation data
    model = load_model('modelPoseRNN.h5')
    preds = model.predict(X_test)

    # Crate a submission - a sorted list of predictions, best match on the left.
    ranking = preds.argsort(axis=1)
    submission = [line.tolist() for line in ranking[:, :-6:-1]]
    print(submission[:10])

    from evaluate import submit
    submit('rrr', submission)
def main(pose_train, pose_test):
    # Read the poses from file
    #here train=train+val
    paths_train, train_pose, train_scores = read_pose(pose_train)
    paths_test, test_pose, test_scores = read_pose(pose_test)

    # Solution
    #using only the poses that have score > 0 in more than 50% of the frames (= the first 13 poses)
    #filtering good values of poses
    bad = np.sum(train_scores[:, :13] < 0, 1)
    good = bad < 7
    good_indices = np.where(good)[0]
    good_paths_train = np.array(paths_train)[good_indices]
    #here t=train+val
    t1 = train_pose[:, :13, :]
    t_pose = np.reshape(t1, (581685, 26))
    t_pose = t_pose[good_indices, :]
    #train
    sequence_sz = 20
    person_ids, video_ids = enumerate_paths(paths_train)
    #    train_indices, val_indices = train_val_split(person_ids, video_ids, 0.7)
    train_indices, val_indices = train_val_split(person_ids[good_indices],
                                                 video_ids[good_indices], 0.7)
    video_ids = video_ids[good_indices]

    train_p = split_by(t_pose[train_indices], video_ids[train_indices])
    train_p2 = []
    for train_p1 in train_p:
        train_p2.append(train_p1[np.random.choice(train_p1.shape[0],
                                                  sequence_sz)])
    X_train = np.array(train_p2)
    #val
    val_p = split_by(t_pose[val_indices], video_ids[val_indices])
    val_p2 = []
    for val_p1 in val_p:
        val_p2.append(val_p1[np.random.choice(val_p1.shape[0], sequence_sz)])
    X_val = np.array(val_p2)

    # Ground truth labels
    val_labels = np.array([
        pids[0]
        for pids in split_by(person_ids[val_indices], video_ids[val_indices])
    ])
    train_labels = np.array([
        pids[0] for pids in split_by(person_ids[train_indices],
                                     video_ids[train_indices])
    ])

    y_train = np.zeros((train_labels.shape[0], 101))
    y_train[np.arange(train_labels.shape[0]), train_labels] = 1
    y_valid = np.zeros((val_labels.shape[0], 101))
    y_valid[np.arange(val_labels.shape[0]), val_labels] = 1

    # --- build RNN model ---
    model = Sequential()
    # inp sz
    poseNum = 26
    #     maxSequenceSz = sequence_sz  # =20, 30, ...3999
    batchSz = 32
    # Recurrent layer
    hiddenSz = 496
    model.add(
        LSTM(hiddenSz,
             return_sequences=False,
             dropout=0.1,
             recurrent_dropout=0.1,
             input_shape=(sequence_sz, poseNum)))
    # Fully connected layer
    fullySz = 64
    model.add(Dense(fullySz, activation='relu'))
    # Dropout for regularization
    model.add(Dropout(0.5))
    # Output layer - 101 persons
    outSz = 101
    model.add(Dense(outSz, activation='softmax'))
    # Compile the model
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print(1)
    print(model.summary())
    # Create callbacks
    callbacks = [
        EarlyStopping(monitor='val_loss', patience=5),
        ModelCheckpoint('modelPoseRNN.h5',
                        save_best_only=True,
                        save_weights_only=False)
    ]
    # fit
    history = model.fit(X_train,
                        y_train,
                        batch_size=batchSz,
                        epochs=50,
                        callbacks=callbacks,
                        validation_data=(X_val, y_valid))

    # --- eval on validation data ---
    # Load in model and evaluate on validation data
    model = load_model('modelPoseRNN.h5')
    model.evaluate(X_val, y_valid)
    # test the performance on the validation data
    preds = model.predict(X_val)
    # Crate a submission - a sorted list of predictions, best match on the left.
    ranking = preds.argsort(axis=1)
    submission = [line.tolist() for line in ranking[:, :-6:-1]]
    print(submission[:10])
Esempio n. 3
0
from data import Images
import os

data_path = os.path.join(os.path.dirname(__file__), 'data')
with Images(os.path.join(data_path, 'images.tar')) as images:
    path = images.paths[3]
    image = images[path]
    print("read image {} of shape {}".format(path, image.shape))
# read image "person_0013/channel_0081/seq

from data import read_pose
#paths, keypoints, scores = read_pose('pose.pkl')
paths, keypoints, scores = read_pose(os.path.join(data_path, 'pose.pkl'))
print(paths.__len__())
print(keypoints.shape)
print(scores.shape)

from data import read_signatures
pathsSig, signatures = read_signatures(
    os.path.join(data_path, 'signatures.pkl'))
print(pathsSig.__len__())
print(signatures.shape)
Esempio n. 4
0
from tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D
from tensorflow.keras.models import Model
import data
from data import Images
from common import resolve_single
import matplotlib.pyplot as plt
from scipy.stats import mode
"""Upload super res GAN libs"""
from model.srgan import generator
from model.cnn import CNN
from utils import tensor2numpy, shuffle, devide, create_onehot, per_label, devide_submission, train_model
import numpy as np
from sklearn.preprocessing import OneHotEncoder

"""Upload images, poses, signatures"""
poses = data.read_pose('./data/pose.pkl')
signatures = data.read_signatures('./data/signatures.pkl')
with Images('data/images.tar') as images:
    path = images.paths[20000]
    image = images._getitem(path)
    print ('read image {} of shape {}'.format(path, image.shape))


my_split=poses[0]
my_split=[path[:-4] for path in my_split]


"""Use SRGAN"""
srgan = generator()
srgan.load_weights('weights/srgan/gan_generator.h5')