Ejemplo n.º 1
0
num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)

start_lr = 0.001
weights_path = os.path.join(logdir, 'weights_posebaseline_{epoch:03d}.hdf5')

batch_size_mpii = 14
batch_size_ar = 2

"""Load datasets"""
mpii = MpiiSinglePerson(datasetpath('MPII'), dataconf=mpii_dataconf,
        poselayout=pa17j3d)

# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
#         poselayout=pa17j3d, topology='frames')f

penn_sf = PennAction(datasetpath('Penn_Action'), pennaction_dataconf,
        poselayout=pa17j3d, topology='frames', use_gt_bbox=True)

ntu_sf = Ntu(datasetpath('NTU'), ntu_pe_dataconf, poselayout=pa17j3d,
        topology='frames', use_gt_bbox=True)

"""Create an object to load data from all datasets."""
data_tr = BatchLoader([mpii, penn_sf, ntu_sf], ['frame'], ['pose'],
        TRAIN_MODE, batch_size=[batch_size_mpii, batch_size_ar,
            batch_size_ar], num_predictions=num_predictions, shuffle=True)

"""MPII validation samples."""
mpii_val = BatchLoader(mpii, ['frame'], ['pose', 'afmat', 'headsize'],
        VALID_MODE, batch_size=mpii.get_length(VALID_MODE), shuffle=True)
printcn(OKBLUE, 'Pre-loading MPII validation data...')
[x_val], [p_val, afmat_val, head_val] = mpii_val[0]
mpii_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val,
Ejemplo n.º 2
0
    "E:\\Bachelorarbeit-SS20\\weights\\deephar\\output\\ntu_baseline\\0603",
    'weights_posebaseline_{epoch:03d}.hdf5')

batch_size_mpii = 10
batch_size_ar = 2
"""Load datasets"""
mpii = MpiiSinglePerson("D:\\MPII", dataconf=mpii_dataconf, poselayout=pa17j3d)

h36m = Human36M("B:\\Human3.6M",
                dataconf=human36m_dataconf,
                poselayout=pa17j3d,
                topology='frames')

penn_sf = PennAction("D:\\PennAction",
                     pennaction_dataconf,
                     poselayout=pa17j3d,
                     topology='frames',
                     use_gt_bbox=True)

ntu_sf = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
             ntu_pe_dataconf,
             poselayout=pa17j3d,
             topology='frames',
             use_gt_bbox=True)
"""Create an object to load data from all datasets."""
data_tr = BatchLoader([mpii, h36m, penn_sf, ntu_sf], ['frame'], ['pose'],
                      TRAIN_MODE,
                      batch_size=[
                          batch_size_mpii, batch_size_mpii, batch_size_ar,
                          batch_size_ar
                      ],
Ejemplo n.º 3
0
                                 num_actions,
                                 input_shape,
                                 num_frames,
                                 num_joints,
                                 num_blocks,
                                 pose_dim=2,
                                 pose_net_version='v1',
                                 full_trainable=False)

weights_path = "weights_pennaction_010.h5"
model.load_weights(weights_path)
"""Load PennAction dataset."""
# """
penn_seq = PennAction('datasets/PennAction',
                      pennaction_dataconf,
                      poselayout=pa16j2d,
                      topology='sequences',
                      use_gt_bbox=use_bbox,
                      clip_size=num_frames)

penn_te = BatchLoader(penn_seq, ['frame'], ['pennaction'],
                      TEST_MODE,
                      batch_size=1,
                      shuffle=False)

# =================================================================
import numpy as np
import json
import matplotlib.pyplot as plt
frame_list = penn_seq.get_clip_index(0, TEST_MODE, subsamples=[2])
with open('datasets/PennAction/penn_pred_bboxes_16f.json', 'r') as fid:
    bboxes_data = json.load(fid)
Ejemplo n.º 4
0
                                 num_frames,
                                 num_joints,
                                 num_blocks,
                                 pose_dim=2,
                                 pose_net_version='v1',
                                 full_trainable=False)
"""Load pre-trained model."""
weights_path = get_file(weights_file,
                        TF_WEIGHTS_PATH,
                        md5_hash=md5_hash,
                        cache_subdir='models')
model.load_weights(weights_path)
"""Load PennAction dataset."""
penn_seq = PennAction('datasets/PennAction',
                      pennaction_dataconf,
                      poselayout=pa16j2d,
                      topology='sequences',
                      use_gt_bbox=use_bbox,
                      clip_size=num_frames)

penn_te = BatchLoader(penn_seq, ['frame'], ['pennaction'],
                      TEST_MODE,
                      batch_size=1,
                      shuffle=False)

printcn(OKGREEN, 'Evaluation on PennAction multi-clip using predicted bboxes')
eval_multiclip_dataset(
    model,
    penn_seq,
    bboxes_file='datasets/PennAction/penn_pred_bboxes_16f.json',
    logdir=logdir)
Ejemplo n.º 5
0
num_frames = 8
num_clips = 250
cfg = ModelConfig((num_frames,) + pennaction_dataconf.input_shape, pa16j2d,
        num_actions=[15], num_pyramids=6, action_pyramids=[1, 2, 3, 4, 5, 6],
        num_levels=4, pose_replica=True,
        num_pose_features=160, num_visual_features=160)

num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)
num_action_predictions = \
        spnet.get_num_predictions(len(cfg.action_pyramids), cfg.num_levels)


"""Load PennAction"""
penn_seq = PennAction(datasetpath('Penn_Action'), pennaction_dataconf,
        poselayout=pa16j2d, topology='sequences', use_gt_bbox=False,
        pred_bboxes_file='pred_bboxes_penn.json', clip_size=num_frames)


"""Build the full model"""
full_model = spnet.build(cfg)

"""Load pre-trained weights from pose estimation and copy replica layers."""
full_model.load_weights(
        'output/penn_multimodel_trial-07-full_2e9fa5a/weights_mpii+penn_ar_028.hdf5',
        by_name=True)

"""Pre-load some samples from PennAction."""
penn_te = BatchLoader(penn_seq, ['frame'], ['pennaction'], TEST_MODE,
        batch_size=num_clips, shuffle=False)
[x], [y] = penn_te[0]
Ejemplo n.º 6
0
# Check file with bounding boxes
penn_data_path = os.getcwd() + '/datasets/PennAction'
penn_bbox_file = 'penn_pred_bboxes_multitask.json'

if os.path.isfile(os.path.join(penn_data_path, penn_bbox_file)) == False:
    logger.debug(
        f'Error: file {penn_bbox_file} not found in {penn_data_path}!')
    logger.debug(
        f'\nPlease download it from https://drive.google.com/file/d/1qXpEKF0d9KxmQdd2_QSIA1c3WGj1D3Y3/view?usp=sharing'
    )
    sys.stdout.flush()
    sys.exit()
penn_seq = PennAction(penn_data_path,
                      pennaction_dataconf,
                      poselayout=pa16j2d,
                      topology='sequences',
                      use_gt_bbox=False,
                      pred_bboxes_file='penn_pred_bboxes_multitask.json',
                      clip_size=num_frames)
logger.info("PENN ACTION Loaded")

logger.info("Build FULL MODEL")
"""Build the full model"""
full_model = spnet.build(cfg)

weights_file = os.getcwd() + '/weights/weights_mpii+penn_ar_028.hdf5'

if os.path.isfile(weights_file) == False:
    logger.debug(f'Error: file {weights_file} not found!')
    logger.debug(
        f'\nPlease download it from  https://drive.google.com/file/d/106yIhqNN-TrI34SX81q2xbU-NczcQj6I/view?usp=sharing'
Ejemplo n.º 7
0
from datasetpath import datasetpath

from generic import get_bbox_from_poses

logdir = './'
if len(sys.argv) > 1:
    logdir = sys.argv[1]
    mkdir(logdir)
    sys.stdout = open(str(logdir) + '/log.txt', 'w')

cfg = ModelConfig(dconf.input_shape, pa16j2d, num_pyramids=8, num_levels=4)
"""Load dataset"""
dpath = datasetpath('Penn_Action')
penn = PennAction(dpath,
                  dconf,
                  poselayout=pa16j2d,
                  topology='frames',
                  use_gt_bbox=False)
"""Build and compile the network."""
model = spnet.build(cfg)
model.load_weights(
    'output/mpii_spnet_51b_741a720/weights_mpii_spnet_8b4l_050.hdf5')
"""Squeeze the model for only one output."""
model = Model(model.input, model.outputs[-1])


def predict_frame_bboxes(mode):
    bboxes = {}

    num_samples = penn.get_length(mode)
Ejemplo n.º 8
0
num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)
num_action_predictions = \
        spnet.get_num_predictions(len(cfg.action_pyramids), cfg.num_levels)

start_lr = 0.001
action_weight = 0.01
batch_size_mpii = int(0.8 * num_frames)
# batch_size_penn = num_frames - batch_size_mpii
batch_size_penn = num_frames
batch_clips = 4  # 8/4
"""Load datasets"""
mpii = MpiiSinglePerson("D:\\MPII", dataconf=mpii_dataconf, poselayout=pa16j2d)

penn_sf = PennAction("D:\\PennAction",
                     pennaction_pe_dataconf,
                     poselayout=pa16j2d,
                     topology='frames',
                     use_gt_bbox=True)

penn_seq = PennAction("D:\\PennAction",
                      pennaction_dataconf,
                      poselayout=pa16j2d,
                      topology='sequences',
                      use_gt_bbox=True,
                      clip_size=num_frames)

# pe_data_tr = BatchLoader([mpii, penn_sf], ['frame'], ['pose'], TRAIN_MODE,
pe_data_tr = BatchLoader(
    [mpii],
    ['frame'],
    ['pose'],
Ejemplo n.º 9
0
#from datasetpath import datasetpath

from generic import get_bbox_from_poses

logdir = './'
if len(sys.argv) > 1:
    logdir = sys.argv[1]
    mkdir(logdir)
    sys.stdout = open(str(logdir) + '/log.txt', 'w')

cfg = ModelConfig(dconf.input_shape, pa16j2d, num_pyramids=8, num_levels=4)
"""Load dataset"""
#dpath = datasetpath('Penn_Action')
penn = PennAction('E:\\Bachelorarbeit-SS20\\datasets\\PennAction',
                  dconf,
                  poselayout=pa16j2d,
                  topology='frames',
                  use_gt_bbox=False)
"""Build and compile the network."""
model = spnet.build(cfg)
model.load_weights(
    'output/mpii_spnet_51b_741a720/weights_mpii_spnet_8b4l_050.hdf5')
"""Squeeze the model for only one output."""
model = Model(model.input, model.outputs[-1])


def predict_frame_bboxes(mode):
    bboxes = {}

    num_samples = penn.get_length(mode)
#num_frames = 16
use_bbox = False
pred_bboxes_file = 'penn_pred_bboxes_16f.json'
num_blocks = 4
batch_size = 2
input_shape = pennaction_dataconf.input_shape
num_joints = 16
num_actions = 15
"""Load datasets"""
#mpii = MpiiSinglePerson('E:\Bachelorarbeit-SS20\datasets\MPII', dataconf=mpii_dataconf,
#        poselayout=pa16j2d)
"""Load PennAction dataset."""
penn_seq = PennAction('E:\Bachelorarbeit-SS20\datasets\PennAction',
                      pennaction_dataconf,
                      poselayout=pa16j2d,
                      topology='sequences',
                      use_gt_bbox=use_bbox,
                      pred_bboxes_file=pred_bboxes_file,
                      clip_size=num_frames)
"""Build the full model"""
full_model = spnet.build(cfg)
"""Load pre-trained weights from pose estimation and copy replica layers."""
full_model.load_weights('output/weights_AR_merge_ep074_26-10-17.h5',
                        by_name=True)
"""This call splits the model into its parts: pose estimation and action
recognition, so we can evaluate each part separately on its respective datasets.
"""
models = split_model(full_model,
                     cfg,
                     interlaced=False,
                     model_names=['2DPose', '2DAction'])