Exemple #1
0
 parsed_ds = dataset.folder_to_dataset(os.path.join(tfr_home, "valid_256"))
 # Load the HuMAn neural network
 # Expects a normalization layer already adapted
 model = HuMAn()
 # Load weights from saved model
 saves_path = "../training/saves/train_universal"
 model.load_weights(saves_path)
 # Split the maximum horizon time (0.5 seconds) into bins
 n_bins = 3
 bin_limits = [0, 0.5 / 3, 2 * 0.5 / 3, 0.5]
 # Create lists to store the absolute error values into bins
 err_bins = [[] for _ in range(n_bins)]
 # Iterate through all specified horizon frames
 for horizon_frames in HORIZON_FRAMES:
     # Load validation data
     mapped_ds = parsed_ds.map(lambda x: dataset.map_dataset(
         x, skeleton="full_body", horizon_frames=horizon_frames),
                               num_parallel_calls=tf.data.AUTOTUNE,
                               deterministic=True)
     # Create a dataset for evaluation
     eval_ds = mapped_ds.batch(256).prefetch(tf.data.AUTOTUNE)
     # Predict for the whole dataset
     print(f"Predicting with horizon_frames={horizon_frames}")
     prediction = model.predict(eval_ds, verbose=1)
     # Create a dataset to be used as reference
     # All sequences are joined in a single large batch
     reference_ds = (mapped_ds.batch(prediction.shape[0]).prefetch(
         tf.data.AUTOTUNE))
     # Extract the values as NumPy arrays
     inputs, pose_targets = next(reference_ds.as_numpy_iterator())
     # Compute the absolute error between targets and predictions
     abs_err = np.abs(prediction - pose_targets)
Exemple #2
0
 tags.append("transfer_bml")
 # Iterate through all groups
 for i in range(len(saves)):
     # Create variables to store mean and stdev for each group
     mean = 0.0
     stdev = 0.0
     # Create also a counter for the number of data points
     pts = 0
     # Iterate inside a single group
     for j in range(len(saves[i])):
         # Load model weights
         model.load_weights(saves[i][j])
         # Iterate through a number of horizon frames
         for horizon_frames in range(1, 11):
             # Load the evaluation dataset
             mapped_ds = datasets[i][j].map(lambda x: dataset.map_dataset(
                 x, skeleton="full_body", horizon_frames=horizon_frames),
                                            num_parallel_calls=tf.data.
                                            AUTOTUNE,
                                            deterministic=True)
             eval_ds = mapped_ds.batch(256).prefetch(tf.data.AUTOTUNE)
             # Predict
             print(f"Predicting for group {tags[i]} "
                   f"with horizon_frames={horizon_frames}")
             prediction = model.predict(eval_ds, verbose=1)
             # Compute the number of data points for a single recording
             rec_pts = prediction.shape[1] * prediction.shape[2]
             # Create the reference dataset
             # All sequences are joined in a single large batch
             reference_ds = (mapped_ds.batch(prediction.shape[0]).prefetch(
                 tf.data.AUTOTUNE))
             # Extract the values as NumPy arrays
Exemple #3
0
 saves_path = "../training/saves/train_universal"
 model.load_weights(saves_path)
 # Iterate through all selected (input) skeleton structures
 for skel_input in SKELETON.keys():
     # Iterate through all selected (prediction) skeleton structures
     for skel_pred in SKELETON[skel_input]:
         # Create NumPy arrays to store mean and stdev for each combination
         # of sampling time and horizon time
         mean = np.zeros(SAMPLING_TIME.shape)
         stdev = np.zeros(SAMPLING_TIME.shape)
         # Create another array, to accumulate the number of data points
         pts = np.zeros(SAMPLING_TIME.shape)
         # Iterate through all specified horizon frames
         for horizon_frames in iter(HORIZON_FRAMES):
             # Load validation data
             mapped_ds = parsed_ds.map(lambda x: dataset.map_dataset(
                 x, skeleton=skel_input, horizon_frames=horizon_frames),
                                       num_parallel_calls=tf.data.AUTOTUNE,
                                       deterministic=True)
             # Create a dataset for evaluation
             eval_ds = mapped_ds.batch(256).prefetch(tf.data.AUTOTUNE)
             # Predict for the whole dataset
             print(f"Predicting with skeleton_input={skel_input}, "
                   f"skeleton_prediction={skel_pred}, "
                   f"and horizon_frames={horizon_frames}")
             prediction = model.predict(eval_ds, verbose=1)
             # Compute the number of data points for a single recording
             rec_pts = prediction.shape[1] * prediction.shape[2]
             # Create a dataset to be used as reference
             # All sequences are joined in a single large batch
             reference_ds = (mapped_ds.batch(prediction.shape[0]).prefetch(
                 tf.data.AUTOTUNE))