if len(sys.argv) > 1: logdir = sys.argv[1] mkdir(logdir) sys.stdout = open(str(logdir) + '/log.txt', 'w') """Architecture configuration.""" num_blocks = 8 batch_size = 24 input_shape = mpii_sp_dataconf.input_shape num_joints = 16 mode = VALID_MODE # Build the model model = reception.build(input_shape, num_joints, dim=2, num_blocks=num_blocks, num_context_per_joint=2, ksize=(5, 5), concat_pose_confidence=False) weights_path = get_file(weights_file, TF_WEIGHTS_PATH, file_hash=md5_hash, cache_subdir='models') """Load pre-trained model.""" model.load_weights(weights_path) """Merge pose and visibility as a single output.""" outputs = [] for b in range(int(len(model.outputs) / 2)): outputs.append( concatenate([model.outputs[2 * b], model.outputs[2 * b + 1]],
mkdir(logdir) sys.stdout = open(str(logdir) + '/log.txt', 'w') num_frames = 20 num_blocks = 4 batch_size = 2 depth_maps = 8 num_joints = 20 num_actions = 60 pose_dim = 3 input_shape = ntu_dataconf.input_shape """Build the pose estimation model.""" model_pe = reception.build(input_shape, num_joints, dim=pose_dim, num_blocks=num_blocks, depth_maps=depth_maps, ksize=(5, 5), concat_pose_confidence=False) """Build the full model using the previous pose estimation one.""" model = action.build_merge_model(model_pe, num_actions, input_shape, num_frames, num_joints, num_blocks, pose_dim=pose_dim, num_context_per_joint=0, pose_net_version='v2') """Load pre-trained model.""" weights_path = get_file(weights_file,
logdir = sys.argv[1] mkdir(logdir) sys.stdout = open(str(logdir) + '/log.txt', 'w') print("*******debug******") weights_file = os.path.join(logdir, 'weights_mpii_{epoch:03d}.h5') """Architecture configuration.""" num_blocks = 8 batch_size = 24 input_shape = mpii_sp_dataconf.input_shape num_joints = 16 print("***********************") print(input_shape) model = reception.build(input_shape, num_joints, dim=2, num_blocks=num_blocks, num_context_per_joint=2, ksize=(5, 5)) """Load the MPII dataset.""" mpii = MpiiSinglePerson('datasets/MPII', dataconf=mpii_sp_dataconf) data_tr = BatchLoader(mpii, ['frame'], ['pose'], TRAIN_MODE, batch_size=batch_size, num_predictions=num_blocks, shuffle=True) """Pre-load validation samples and generate the eval. callback.""" mpii_val = BatchLoader(mpii, x_dictkeys=['frame'], y_dictkeys=['pose', 'afmat', 'headsize'], mode=VALID_MODE,
logdir = sys.argv[1] mkdir(logdir) sys.stdout = open(str(logdir) + '/log.txt', 'w') num_frames = 20 num_blocks = 4 batch_size = 2 depth_maps = 8 num_joints = 20 num_actions = 60 pose_dim = 3 input_shape = ntu_ar_dataconf.input_shape """Build the pose estimation model.""" model_pe = reception.build(input_shape, num_joints, dim=pose_dim, num_blocks=num_blocks, depth_maps=depth_maps, ksize=(5, 5)) """Build the full model using the previous pose estimation one.""" model = action.build_merge_model(model_pe, num_actions, input_shape, num_frames, num_joints, num_blocks, pose_dim=pose_dim, num_context_per_joint=0, pose_net_version='v2') """Load pre-trained model.""" model.load_weights('weights_0052.h5') """Load NTU dataset."""
md5_hash = 'af79f83ad939117d4ccc2cf1d4bd37d2' logdir = './' if len(sys.argv) > 1: logdir = sys.argv[1] mkdir(logdir) sys.stdout = open(str(logdir) + '/log.txt', 'w') num_blocks = 8 batch_size = 24 input_shape = human36m_dataconf.input_shape num_joints = pa17j3d.num_joints model = reception.build(input_shape, num_joints, dim=3, num_blocks=num_blocks, ksize=(5, 5)) """Load pre-trained model.""" weights_path = get_file(weights_file, TF_WEIGHTS_PATH, md5_hash=md5_hash, cache_subdir='models') model.load_weights(weights_path) """Merge pose and visibility as a single output.""" outputs = [] for b in range(int(len(model.outputs) / 2)): outputs.append( concatenate([model.outputs[2 * b], model.outputs[2 * b + 1]], name='blk%d' % (b + 1))) model = Model(model.input, outputs, name=model.name)