Пример #1
0
weights_path = os.path.join(logdir, 'weights_posebaseline_{epoch:03d}.hdf5')

batch_size_mpii = 14
batch_size_ar = 2

"""Load datasets"""
mpii = MpiiSinglePerson(datasetpath('MPII'), dataconf=mpii_dataconf,
        poselayout=pa17j3d)

# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
#         poselayout=pa17j3d, topology='frames')f

penn_sf = PennAction(datasetpath('Penn_Action'), pennaction_dataconf,
        poselayout=pa17j3d, topology='frames', use_gt_bbox=True)

ntu_sf = Ntu(datasetpath('NTU'), ntu_pe_dataconf, poselayout=pa17j3d,
        topology='frames', use_gt_bbox=True)

"""Create an object to load data from all datasets."""
data_tr = BatchLoader([mpii, penn_sf, ntu_sf], ['frame'], ['pose'],
        TRAIN_MODE, batch_size=[batch_size_mpii, batch_size_ar,
            batch_size_ar], num_predictions=num_predictions, shuffle=True)

"""MPII validation samples."""
mpii_val = BatchLoader(mpii, ['frame'], ['pose', 'afmat', 'headsize'],
        VALID_MODE, batch_size=mpii.get_length(VALID_MODE), shuffle=True)
printcn(OKBLUE, 'Pre-loading MPII validation data...')
[x_val], [p_val, afmat_val, head_val] = mpii_val[0]
mpii_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val,
        map_to_pa16j=pa17j3d.map_to_pa16j, logdir=logdir)

# """Human3.6H validation samples."""
Пример #2
0
        num_pose_features=192, num_visual_features=192)

logger.debug(cfg)
num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)
num_action_predictions = spnet.get_num_predictions(len(cfg.action_pyramids), cfg.num_levels)
logger.debug("NUM PREDICTIONS")
logger.debug(num_action_predictions)

"""Load datasets"""
# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
        # poselayout=pa17j3d, topology='frames')

ntu_data_path = os.getcwd() + '/datasets/NTU'
logger.debug(ntu_data_path)

ntu = Ntu(ntu_data_path, ntu_dataconf, poselayout=pa17j3d,
        topology='sequences', use_gt_bbox=True, clip_size=num_frames, num_S=5)
#logger.debug ('WARNING!! USING ONLY S1 FOR EVALUATION!')

"""Build the full model"""
full_model = spnet.build(cfg)

weights_file = os.getcwd() + '/weights/weights_3dp+ntu_ar_062.hdf5'
logger.debug(weights_file)
if os.path.isfile(weights_file) == False:
    logger.debug (f'Error: file {weights_file} not found!')
    logger.debug (f'\nPlease download it from  https://drive.google.com/file/d/1I6GftXEkL5nohLA60Vi6faW0rvTZg6Kx/view?usp=sharing')
    sys.stdout.flush()
    sys.exit()


"""Load pre-trained weights from pose estimation and copy replica layers."""
Пример #3
0
    sys.stdout = open(str(logdir) + '/log.txt', 'w')

num_frames = 8
cfg = ModelConfig((num_frames,) + ntu_dataconf.input_shape, pa17j3d,
        num_actions=[60], num_pyramids=2, action_pyramids=[1, 2],
        num_levels=4, pose_replica=False,
        num_pose_features=192, num_visual_features=192)

num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)
num_action_predictions = spnet.get_num_predictions(len(cfg.action_pyramids), cfg.num_levels)

"""Load datasets"""
# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
        # poselayout=pa17j3d, topology='frames')

ntu = Ntu(datasetpath('NTU'), ntu_dataconf, poselayout=pa17j3d,
        topology='sequences', use_gt_bbox=True, clip_size=num_frames)#, num_S=1)
# print ('WARNING!! USING ONLY S1 FOR EVALUATION!')

"""Build the full model"""
full_model = spnet.build(cfg)

"""Load pre-trained weights from pose estimation and copy replica layers."""
full_model.load_weights(
        # 'output/ntu_spnet_trial-03-ft_replica_0ae2bf7/weights_3dp+ntu_ar_062.hdf5',
        'output/ntu_spnet_trial_06_nopose_g_512a239/weights_3dp+ntu_ar_030.hdf5',
        by_name=True)

"""Split model to simplify evaluation."""
models = split_model(full_model, cfg, interlaced=False,
        model_names=['3DPose', '3DAction'])
Пример #4
0
mpii = MpiiSinglePerson("D:\\MPII", dataconf=mpii_dataconf, poselayout=pa17j3d)

h36m = Human36M("B:\\Human3.6M",
                dataconf=human36m_dataconf,
                poselayout=pa17j3d,
                topology='frames')

penn_sf = PennAction("D:\\PennAction",
                     pennaction_dataconf,
                     poselayout=pa17j3d,
                     topology='frames',
                     use_gt_bbox=True)

ntu_sf = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
             ntu_pe_dataconf,
             poselayout=pa17j3d,
             topology='frames',
             use_gt_bbox=True)
"""Create an object to load data from all datasets."""
data_tr = BatchLoader([mpii, h36m, penn_sf, ntu_sf], ['frame'], ['pose'],
                      TRAIN_MODE,
                      batch_size=[
                          batch_size_mpii, batch_size_mpii, batch_size_ar,
                          batch_size_ar
                      ],
                      num_predictions=num_predictions,
                      shuffle=True)
"""MPII validation samples."""
mpii_val = BatchLoader(mpii, ['frame'], ['pose', 'afmat', 'headsize'],
                       VALID_MODE,
                       batch_size=mpii.get_length(VALID_MODE),
Пример #5
0
action_weight = 0.1
batch_size_mpii = 3
batch_size_h36m = 4
batch_size_ntu = 8  #1
batch_clips = 4  # 8/4
"""Load datasets"""
mpii = MpiiSinglePerson(datasetpath('MPII'),
                        dataconf=mpii_dataconf,
                        poselayout=pa17j3d)

# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
# poselayout=pa17j3d, topology='frames')

ntu_sf = Ntu(datasetpath('NTU'),
             ntu_pe_dataconf,
             poselayout=pa17j3d,
             topology='frames',
             use_gt_bbox=True)

ntu = Ntu(datasetpath('NTU'),
          ntu_dataconf,
          poselayout=pa17j3d,
          topology='sequences',
          use_gt_bbox=True,
          clip_size=num_frames)

ntu_s1 = Ntu(datasetpath('NTU'),
             ntu_dataconf,
             poselayout=pa17j3d,
             topology='sequences',
             use_gt_bbox=True,
model = action.build_merge_model(model_pe,
                                 num_actions,
                                 input_shape,
                                 num_frames,
                                 num_joints,
                                 num_blocks,
                                 pose_dim=pose_dim,
                                 num_context_per_joint=0,
                                 pose_net_version='v2')
"""Load pre-trained model."""
# weights_path = get_file(weights_file, TF_WEIGHTS_PATH, md5_hash=md5_hash,
#         cache_subdir='models')
weights_path = os.path.join(TF_WEIGHTS_PATH, weights_file)
model.load_weights(weights_path)
"""Load NTU dataset."""
ntu = Ntu('../../datasets/NTU',
          ntu_dataconf,
          poselayout=pa20j3d,
          topology='sequences',
          use_gt_bbox=True,
          clip_size=num_frames)

ntu_te = BatchLoader(ntu, ['frame'], ['ntuaction'],
                     TEST_MODE,
                     batch_size=1,
                     shuffle=False)

printcn(OKGREEN, 'Evaluation on NTU single-clip using GT bboxes')

eval_singleclip_generator(model, ntu_te)
Пример #7
0
cfg = ModelConfig(dconf.input_shape,
                  pa17j3d,
                  num_pyramids=8,
                  action_pyramids=[],
                  num_levels=4)

full_model = spnet.build(cfg)
full_model.load_weights(
    'output/pose_baseline_3dp_02_94226e0/weights_posebaseline_060.hdf5')
"""Squeeze the model for only one output."""
model = Model(full_model.input, full_model.outputs[-1])
model.summary()
"""Load dataset"""
ntu = Ntu(datasetpath('NTU'),
          dconf,
          poselayout=pa17j3d,
          topology='frames',
          use_gt_bbox=False)


def predict_frame_bboxes(mode):
    bboxes = {}

    num_samples = ntu.get_length(mode)
    for i in range(num_samples):
        printnl('mode %d: %07d/%07d' % (mode, i + 1, num_samples))

        data = ntu.get_data(i, mode)
        poses = model.predict(np.expand_dims(data['frame'], axis=0))
        bbox = get_bbox_from_poses(poses, data['afmat'], scale=1.5)
        seq_idx = data['seq_idx']
action_weight = 0.1
batch_size_mpii = 3
#batch_size_h36m = 4
batch_size_ntu = 6  #1
batch_clips = 3  # 8/4
"""Load datasets"""
mpii = MpiiSinglePerson("E:\\Bachelorarbeit-SS20\\datasets\\MPII",
                        dataconf=mpii_dataconf,
                        poselayout=pa17j3d)

# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
# poselayout=pa17j3d, topology='frames')

ntu_sf = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
             ntu_pe_dataconf,
             poselayout=pa17j3d,
             topology='frames',
             use_gt_bbox=True)

ntu = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
          ntu_dataconf,
          poselayout=pa17j3d,
          topology='sequences',
          use_gt_bbox=True,
          clip_size=num_frames)

ntu_s1 = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
             ntu_dataconf,
             poselayout=pa17j3d,
             topology='sequences',
             use_gt_bbox=True,
Пример #9
0
                  action_pyramids=[1, 2],
                  num_levels=4,
                  pose_replica=False,
                  num_pose_features=192,
                  num_visual_features=192)

num_predictions = spnet.get_num_predictions(cfg.num_pyramids, cfg.num_levels)
num_action_predictions = spnet.get_num_predictions(len(cfg.action_pyramids),
                                                   cfg.num_levels)
"""Load datasets"""
# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
# poselayout=pa17j3d, topology='frames')

ntu = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
          ntu_dataconf,
          poselayout=pa17j3d,
          topology='sequences',
          use_gt_bbox=True,
          clip_size=num_frames)  #, num_S=1)
# print ('WARNING!! USING ONLY S1 FOR EVALUATION!')
"""Build the full model"""
full_model = spnet.build(cfg)
"""Load pre-trained weights from pose estimation and copy replica layers."""
full_model.load_weights(
    # 'output/ntu_spnet_trial-03-ft_replica_0ae2bf7/weights_3dp+ntu_ar_062.hdf5',
    "E:\\Bachelorarbeit-SS20\\weights\\deephar\\output\\spnet\\0429\\weights_3dp+ntu_ar_048.hdf5",
    by_name=True)
"""Split model to simplify evaluation."""
models = split_model(full_model,
                     cfg,
                     interlaced=False,
                     model_names=['3DPose', '3DAction'])
start_lr = 0.01
action_weight = 0.1
batch_size_mpii = 3
#batch_size_h36m = 4
batch_size_ntu = 6 #1
batch_clips = 3 # 8/4

"""Load datasets"""
mpii = MpiiSinglePerson("/home/ispl-ex39/Downloads/deephar-master/datasets/MPII", dataconf=mpii_dataconf,
        poselayout=pa17j3d)

# h36m = Human36M(datasetpath('Human3.6M'), dataconf=human36m_dataconf,
        # poselayout=pa17j3d, topology='frames')

ntu_sf = Ntu("/home/ispl-ex39/hdd_ext/hdd2000/NTU", ntu_pe_dataconf, poselayout=pa17j3d,
        topology='frames', use_gt_bbox=True)

ntu = Ntu("/home/ispl-ex39/hdd_ext/hdd2000/NTU", ntu_dataconf, poselayout=pa17j3d,
        topology='sequences', use_gt_bbox=True, clip_size=num_frames)

ntu_s1 = Ntu("/home/ispl-ex39/hdd_ext/hdd2000/NTU", ntu_dataconf, poselayout=pa17j3d,
        topology='sequences', use_gt_bbox=True, clip_size=num_frames)
        # topology='sequences', use_gt_bbox=True, clip_size=num_frames, num_S=1)

pe_data_tr = BatchLoader([ntu_sf], ['frame'], ['pose'], TRAIN_MODE,
        batch_size=[batch_size_ntu],
        shuffle=True)
pe_data_tr = BatchLoader(pe_data_tr, ['frame'], ['pose'], TRAIN_MODE,
        batch_size=batch_clips, num_predictions=num_predictions, shuffle=False)

ar_data_tr = BatchLoader(ntu, ['frame'], ['ntuaction'], TRAIN_MODE,
Пример #11
0
cfg = ModelConfig(dconf.input_shape,
                  pa17j3d,
                  num_pyramids=8,
                  action_pyramids=[],
                  num_levels=4)

full_model = spnet.build(cfg)
full_model.load_weights(
    'output/pose_baseline_3dp_02_94226e0/weights_posebaseline_060.hdf5')
"""Squeeze the model for only one output."""
model = Model(full_model.input, full_model.outputs[-1])
model.summary()
"""Load dataset"""
ntu = Ntu("E:\\Bachelorarbeit-SS20\\datasets\\NTU",
          dconf,
          poselayout=pa17j3d,
          topology='frames',
          use_gt_bbox=False)


def predict_frame_bboxes(mode):
    bboxes = {}

    num_samples = ntu.get_length(mode)
    for i in range(num_samples):
        printnl('mode %d: %07d/%07d' % (mode, i + 1, num_samples))

        data = ntu.get_data(i, mode)
        poses = model.predict(np.expand_dims(data['frame'], axis=0))
        bbox = get_bbox_from_poses(poses, data['afmat'], scale=1.5)
        seq_idx = data['seq_idx']