def _generate_hera_training_data(fine_labels_path, coarse_labels_path, fine_action_to_id, coarse_action_to_id, input_seq_len, output_seq_len, num_cuts, observe_at_least_k_percent, ignore_silence_action, add_final_action): random.seed(42) xs_enc_coarse, xs_tra_coarse, xs_dec_coarse = [], [], [] xs_enc_fine, xs_tra_fine, xs_dec_fine = [], [], [] ys_enc_coarse, ys_tra_coarse, ys_dec_coarse = [], [], [] ys_enc_fine, ys_tra_fine, ys_dec_fine = [], [], [] encs_boundary, decs_boundary = [], [] tensors = [ xs_enc_coarse, xs_tra_coarse, xs_dec_coarse, xs_enc_fine, xs_tra_fine, xs_dec_fine, ys_enc_coarse, ys_tra_coarse, ys_dec_coarse, ys_enc_fine, ys_tra_fine, ys_dec_fine, encs_boundary, decs_boundary ] fine_label_files = set(os.listdir(fine_labels_path)) coarse_label_files = set(os.listdir(coarse_labels_path)) label_files = sorted(fine_label_files & coarse_label_files) for label_file in label_files: with open(os.path.join(fine_labels_path, label_file), mode='r') as f: fine_actions_per_frame = [line.rstrip() for line in f] with open(os.path.join(coarse_labels_path, label_file), mode='r') as f: coarse_actions_per_frame = [line.rstrip() for line in f] fine_actions_per_frame, coarse_actions_per_frame = \ extend_smallest_list(fine_actions_per_frame, coarse_actions_per_frame) single_video_tensors = \ _generate_hera_training_data_from_single_video(fine_actions_per_frame, coarse_actions_per_frame, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, input_seq_len=input_seq_len, output_seq_len=output_seq_len, num_cuts=num_cuts, observe_at_least_k_percent=observe_at_least_k_percent, ignore_silence_action=ignore_silence_action, add_final_action=add_final_action) for tensor_list, single_video_tensor in zip(tensors, single_video_tensors): tensor_list.append(single_video_tensor) tensors = [np.concatenate(tensor_list, axis=0) for tensor_list in tensors] tensors = shuffle(*tensors, random_state=42) names = [ 'x_enc_coarse', 'x_tra_coarse', 'x_dec_coarse', 'x_enc_fine', 'x_tra_fine', 'x_dec_fine', 'y_enc_coarse', 'y_tra_coarse', 'y_dec_coarse', 'y_enc_fine', 'y_tra_fine', 'y_dec_fine', 'enc_boundary', 'dec_boundary' ] tensors_dict = dict(zip(names, tensors)) return tensors_dict
def analyse_flushes_hierarchical(flushes_per_video, ground_truth_flushes_per_video, label_files, seq_len, save_path, encoder=True, extra_str=''): if not os.path.exists(save_path): os.makedirs(save_path) if extra_str: save_file_name = 'flushes_' + extra_str + '_full_split.txt' else: save_file_name = 'flushes_full_split.txt' with open(os.path.join(save_path, save_file_name), mode='w') as f: for flushes, ground_truth_flushes, label_file in \ zip(flushes_per_video, ground_truth_flushes_per_video, label_files): num_steps = len(flushes) flushes, ground_truth_flushes = extend_smallest_list( flushes, ground_truth_flushes, extension_val=0.0) flushes_ground_truth_positions = compute_flushes_positions( ground_truth_flushes) flushes_positions = compute_flushes_positions(flushes) f.write(label_file + '\n') if encoder: f.write('\tNumber of Input Steps: %d' % seq_len) else: f.write('\tNumber of Output Steps: %d' % seq_len) f.write('\n') f.write('\tNumber of Predicted Steps: %d' % num_steps) f.write('\n') f.write('\tGround-truth Flush Positions:') for flush_ground_truth_position in flushes_ground_truth_positions: f.write(' ' + str(flush_ground_truth_position).rjust(2)) f.write('\n') f.write('\t Predicted Flush Positions:') for flush_position in flushes_positions: f.write(' ' + str(flush_position).rjust(2)) f.write('\n\n')
def test_baselines(args): checkpoint = torch.load(args.checkpoint) fine_labels_path = args.fine_labels_path coarse_labels_path = args.coarse_labels_path fine_action_to_id = read_action_dictionary(args.fine_action_to_id) fine_id_to_action = { action_id: action for action, action_id in fine_action_to_id.items() } coarse_action_to_id = read_action_dictionary(args.coarse_action_to_id) coarse_id_to_action = { action_id: action for action, action_id in coarse_action_to_id.items() } fraction_observed = args.observed_fraction ignore_silence_action = args.ignore_silence_action do_error_analysis = args.do_error_analysis print_coarse_results = args.print_coarse_results seq_len = checkpoint['seq_len'] # Load model baseline_type = checkpoint['baseline_type'] action_level = checkpoint['action_level'] Baseline = {0: Baseline0, 1: Baseline1, 2: Baseline2}[baseline_type] device = 'cuda' if torch.cuda.is_available() else 'cpu' model = Baseline(**checkpoint['model_creation_args']).to(device) model.load_state_dict(checkpoint['model_state_dict']) model.eval() observed_fine_actions_per_video = [] unobserved_fine_actions_per_video = [] predicted_fine_steps_per_video = [] predicted_fine_actions_per_video = [] observed_coarse_actions_per_video = [] unobserved_coarse_actions_per_video = [] predicted_coarse_steps_per_video = [] predicted_coarse_actions_per_video = [] num_frames_per_video = [] fine_label_files = set(os.listdir(fine_labels_path)) coarse_label_files = set(os.listdir(coarse_labels_path)) label_files = sorted(fine_label_files & coarse_label_files) for label_file in label_files: with open(os.path.join(fine_labels_path, label_file), mode='r') as f: fine_actions_per_frame = [line.rstrip() for line in f] with open(os.path.join(coarse_labels_path, label_file), mode='r') as f: coarse_actions_per_frame = [line.rstrip() for line in f] if ignore_silence_action is not None: fine_actions_per_frame = [ fine_action for fine_action in fine_actions_per_frame if fine_action != ignore_silence_action ] coarse_actions_per_frame = [ coarse_action for coarse_action in coarse_actions_per_frame if coarse_action != ignore_silence_action ] fine_actions_per_frame, coarse_actions_per_frame = \ extend_smallest_list(fine_actions_per_frame, coarse_actions_per_frame) num_frames = len(fine_actions_per_frame) num_frames_per_video.append(num_frames) num_frames_to_grab = round(num_frames * fraction_observed) observed_fine_actions = fine_actions_per_frame[:num_frames_to_grab] observed_fine_actions_per_video.append(observed_fine_actions) unobserved_fine_actions = fine_actions_per_frame[num_frames_to_grab:] observed_coarse_actions = coarse_actions_per_frame[:num_frames_to_grab] observed_coarse_actions_per_video.append(observed_coarse_actions) unobserved_coarse_actions = coarse_actions_per_frame[ num_frames_to_grab:] tensors, steps, last_action_obs_length = generate_test_datum( observed_fine_actions, observed_coarse_actions, seq_len=seq_len, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, num_frames=num_frames) tensors = [nan_to_value(tensor, value=0.0) for tensor in tensors] tensors = numpy_to_torch(*tensors, device=device) steps = torch.tensor([steps], device=device) predictions = predict_future_actions( model, tensors, effective_steps=steps, fine_id_to_action=fine_id_to_action, coarse_id_to_action=coarse_id_to_action, num_frames=num_frames, maximum_prediction_length=len(unobserved_fine_actions), baseline_type=baseline_type, action_level=action_level, last_action_obs_length=last_action_obs_length) predicted_fine_actions, predicted_fine_steps = predictions predicted_fine_actions, predicted_coarse_actions = predicted_fine_actions predicted_fine_steps, predicted_coarse_steps = predicted_fine_steps predicted_fine_steps_per_video.append(predicted_fine_steps) predicted_coarse_steps_per_video.append(predicted_coarse_steps) _update_level_predictions(predicted_fine_actions, predicted_fine_actions_per_video, unobserved_fine_actions, unobserved_fine_actions_per_video) _update_level_predictions(predicted_coarse_actions, predicted_coarse_actions_per_video, unobserved_coarse_actions, unobserved_coarse_actions_per_video) # Performance and Error Analysis f1_results_dict = {} moc_results_dict = {} unobserved_fractions = [0.1, 0.2, 0.3, 0.5, 0.7, 0.8] unobserved_fractions = [ unobserved_fraction for unobserved_fraction in unobserved_fractions if fraction_observed + unobserved_fraction <= 1.0 ] for unobserved_fraction in unobserved_fractions: save_analysis_path = os.path.join( args.checkpoint[:-4], str(fraction_observed) + '_' + str(unobserved_fraction)) if os.path.exists(save_analysis_path): clean_directory(save_analysis_path) predicted_fine_actions_per_video_sub, unobserved_fine_actions_per_video_sub = [], [] predicted_coarse_actions_per_video_sub, unobserved_coarse_actions_per_video_sub = [], [] f1_per_video_fine = [] # file_name, input-level_0.5_f1 f1_per_video_coarse = [] sequence_metrics_per_video_fine = [ ] # file_name, precision, recall, f1 (regardless of length/class) sequence_metrics_per_video_coarse = [] num_videos = len(predicted_fine_actions_per_video) for i in range(num_videos): predicted_fine_actions = predicted_fine_actions_per_video[i] unobserved_fine_actions = unobserved_fine_actions_per_video[i] num_frames_to_grab = num_frames_per_video[i] * unobserved_fraction num_frames_to_grab = round(num_frames_to_grab) predicted_fine_actions_sub = predicted_fine_actions[: num_frames_to_grab] predicted_fine_actions_per_video_sub.append( predicted_fine_actions_sub) unobserved_fine_actions_sub = unobserved_fine_actions[: num_frames_to_grab] unobserved_fine_actions_per_video_sub.append( unobserved_fine_actions_sub) predicted_coarse_actions = predicted_coarse_actions_per_video[i] unobserved_coarse_actions = unobserved_coarse_actions_per_video[i] predicted_coarse_actions_sub = predicted_coarse_actions[: num_frames_to_grab] predicted_coarse_actions_per_video_sub.append( predicted_coarse_actions_sub) unobserved_coarse_actions_sub = unobserved_coarse_actions[: num_frames_to_grab] unobserved_coarse_actions_per_video_sub.append( unobserved_coarse_actions_sub) if do_error_analysis: if baseline_type == 0: if action_level == 'coarse': observed_actions = observed_coarse_actions_per_video[i] predicted_steps = predicted_coarse_steps_per_video[i] unobserved_actions = unobserved_coarse_actions_sub.tolist( ) predicted_actions_sub = predicted_coarse_actions_sub unobserved_actions_sub = unobserved_coarse_actions_sub action_to_id = coarse_action_to_id else: observed_actions = observed_fine_actions_per_video[i] predicted_steps = predicted_fine_steps_per_video[i] unobserved_actions = unobserved_fine_actions_sub.tolist( ) predicted_actions_sub = predicted_fine_actions_sub unobserved_actions_sub = unobserved_fine_actions_sub action_to_id = fine_action_to_id steps_to_grab = compute_steps_to_grab( predicted_steps, num_frames_to_grab) predicted_steps = predicted_steps[:steps_to_grab] analyse_single_level_observations_and_predictions_per_step( predicted_steps, observed_actions, unobserved_actions, num_frames=num_frames_per_video[i], save_path=save_analysis_path, save_file_name=label_files[i]) _, f1_scores = compute_metrics([predicted_actions_sub], [unobserved_actions_sub], action_to_id=action_to_id) if action_level == 'coarse': f1_per_video_coarse.append( [label_files[i], f1_scores[-1]]) else: f1_per_video_fine.append( [label_files[i], f1_scores[-1]]) precision, recall, f1 = \ action_sequence_metrics(aggregate_actions_and_lengths(predicted_actions_sub.tolist())[0], aggregate_actions_and_lengths(unobserved_actions_sub.tolist())[0]) if action_level == 'coarse': sequence_metrics_per_video_coarse.append( [label_files[i], precision, recall, f1]) else: sequence_metrics_per_video_fine.append( [label_files[i], precision, recall, f1]) else: observed_actions = [ coarse_action + '/' + fine_action for coarse_action, fine_action in zip( observed_coarse_actions_per_video[i], observed_fine_actions_per_video[i]) ] predicted_fine_steps = predicted_fine_steps_per_video[i] steps_to_grab = compute_steps_to_grab( predicted_fine_steps, num_frames_to_grab) predicted_fine_steps = predicted_fine_steps[:steps_to_grab] predicted_coarse_steps = predicted_coarse_steps_per_video[ i][:steps_to_grab] predicted_steps = [ (coarse_step[0] + '/' + fine_step[0], fine_step[1]) for coarse_step, fine_step in zip( predicted_coarse_steps, predicted_fine_steps) ] unobserved_actions = [ coarse_action + '/' + fine_action for coarse_action, fine_action in zip( unobserved_coarse_actions_sub.tolist(), unobserved_fine_actions_sub.tolist()) ] analyse_single_level_observations_and_predictions_per_step( predicted_steps, observed_actions, unobserved_actions, num_frames=num_frames_per_video[i], save_path=save_analysis_path, save_file_name=label_files[i]) _, f1_scores_fine = compute_metrics( [predicted_fine_actions_sub], [unobserved_fine_actions_sub], action_to_id=fine_action_to_id) f1_per_video_fine.append( [label_files[i], f1_scores_fine[-1]]) _, f1_scores_coarse = compute_metrics( [predicted_coarse_actions_sub], [unobserved_coarse_actions_sub], action_to_id=coarse_action_to_id) f1_per_video_coarse.append( [label_files[i], f1_scores_coarse[-1]]) precision, recall, f1 = \ action_sequence_metrics(aggregate_actions_and_lengths(predicted_fine_actions_sub.tolist())[0], aggregate_actions_and_lengths(unobserved_fine_actions_sub.tolist())[0]) sequence_metrics_per_video_fine.append( [label_files[i], precision, recall, f1]) precision, recall, f1 = \ action_sequence_metrics(aggregate_actions_and_lengths(predicted_coarse_actions_sub.tolist())[0], aggregate_actions_and_lengths(unobserved_coarse_actions_sub.tolist())[0]) sequence_metrics_per_video_coarse.append( [label_files[i], precision, recall, f1]) if do_error_analysis: if f1_per_video_fine: write_results_per_video(f1_per_video_fine, order_by=None, metric_name='f1-0.5-fine', save_path=save_analysis_path) write_sequence_results_per_video( sequence_metrics_per_video_fine, save_analysis_path, level='fine') if f1_per_video_coarse: write_results_per_video(f1_per_video_coarse, order_by=None, metric_name='f1-0.5-coarse', save_path=save_analysis_path) write_sequence_results_per_video( sequence_metrics_per_video_coarse, save_analysis_path, level='coarse') print('\nObserved fraction: %.2f | Unobserved fraction: %.2f' % (fraction_observed, unobserved_fraction)) if baseline_type == 0 and action_level == 'coarse': predicted_actions_per_video_sub = predicted_coarse_actions_per_video_sub unobserved_actions_per_video_sub = unobserved_coarse_actions_per_video_sub action_to_id = coarse_action_to_id print('Coarse') else: predicted_actions_per_video_sub = predicted_fine_actions_per_video_sub unobserved_actions_per_video_sub = unobserved_fine_actions_per_video_sub action_to_id = fine_action_to_id print('Fine') moc, _, _ = compute_moc( np.concatenate(predicted_actions_per_video_sub), np.concatenate(unobserved_actions_per_video_sub), action_to_id=action_to_id) if baseline_type == 0 and action_level == 'coarse': moc_results_dict[ f'coarse-moc-{fraction_observed}_{unobserved_fraction}'] = moc else: moc_results_dict[ f'fine-moc-{fraction_observed}_{unobserved_fraction}'] = moc overlaps, f1_overlap_scores = compute_metrics( predicted_actions_per_video_sub, unobserved_actions_per_video_sub, action_to_id=action_to_id) for overlap, overlap_f1_score in zip(overlaps, f1_overlap_scores): print('F1@%.2f: %.4f' % (overlap, overlap_f1_score)) if baseline_type == 0 and action_level == 'coarse': f1_results_dict[ f'coarse-{fraction_observed}_{unobserved_fraction}_{overlap}'] = overlap_f1_score else: f1_results_dict[ f'fine-{fraction_observed}_{unobserved_fraction}_{overlap}'] = overlap_f1_score if baseline_type > 0: moc, _, _ = compute_moc( np.concatenate(predicted_coarse_actions_per_video_sub), np.concatenate(unobserved_coarse_actions_per_video_sub), action_to_id=coarse_action_to_id) moc_results_dict[ f'coarse-moc-{fraction_observed}_{unobserved_fraction}'] = moc overlaps, f1_overlap_scores = \ compute_metrics(predicted_coarse_actions_per_video_sub, unobserved_coarse_actions_per_video_sub, action_to_id=coarse_action_to_id) for overlap, overlap_f1_score in zip(overlaps, f1_overlap_scores): f1_results_dict[ f'coarse-{fraction_observed}_{unobserved_fraction}_{overlap}'] = overlap_f1_score if print_coarse_results: print('Coarse') print('F1@%.2f: %.4f' % (overlap, overlap_f1_score)) results_dict = {**f1_results_dict, **moc_results_dict} return results_dict
def _generate_baselines_training_data(fine_labels_path, coarse_labels_path, fine_action_to_id, coarse_action_to_id, seq_len, ignore_silence_action, add_final_action, is_validation): xs_enc_fine, xs_enc_coarse = [], [] ys_enc_fine, ys_enc_coarse = [], [] effective_num_steps_per_video = [] tensors = [ xs_enc_fine, xs_enc_coarse, ys_enc_fine, ys_enc_coarse, effective_num_steps_per_video ] fine_label_files = set(os.listdir(fine_labels_path)) coarse_label_files = set(os.listdir(coarse_labels_path)) label_files = sorted(fine_label_files & coarse_label_files) for label_file in label_files: with open(os.path.join(fine_labels_path, label_file), mode='r') as f: fine_actions_per_frame = [line.rstrip() for line in f] with open(os.path.join(coarse_labels_path, label_file), mode='r') as f: coarse_actions_per_frame = [line.rstrip() for line in f] fine_actions_per_frame, coarse_actions_per_frame = \ extend_smallest_list(fine_actions_per_frame, coarse_actions_per_frame) if ignore_silence_action is not None: fine_actions_per_frame = [ fine_action for fine_action in fine_actions_per_frame if fine_action != ignore_silence_action ] coarse_actions_per_frame = [ coarse_action for coarse_action in coarse_actions_per_frame if coarse_action != ignore_silence_action ] error_msg = 'Action levels do not match after removing silence action.' assert len(fine_actions_per_frame) == len( coarse_actions_per_frame), error_msg if is_validation: single_video_tensors = \ _generate_baselines_validation_from_single_video(fine_actions_per_frame, coarse_actions_per_frame, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, seq_len=seq_len, add_final_action=add_final_action) else: single_video_tensors = \ _generate_baselines_training_data_from_single_video(fine_actions_per_frame, coarse_actions_per_frame, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, seq_len=seq_len, add_final_action=add_final_action) for tensor_list, single_video_tensor in zip(tensors, single_video_tensors): tensor_list.append(single_video_tensor) tensors = [np.concatenate(tensor_list, axis=0) for tensor_list in tensors] tensors = shuffle(*tensors, random_state=42) names = [ 'x_enc_fine', 'x_enc_coarse', 'y_enc_fine', 'y_enc_coarse', 'effective_num_steps' ] tensors_dict = dict(zip(names, tensors)) return tensors_dict
def test_hera(args): checkpoint = torch.load(args.checkpoint) fine_labels_path = args.fine_labels_path coarse_labels_path = args.coarse_labels_path fine_action_to_id = read_action_dictionary(args.fine_action_to_id) fine_id_to_action = {action_id: action for action, action_id in fine_action_to_id.items()} coarse_action_to_id = read_action_dictionary(args.coarse_action_to_id) coarse_id_to_action = {action_id: action for action, action_id in coarse_action_to_id.items()} fraction_observed = args.observed_fraction ignore_silence_action = args.ignore_silence_action do_error_analysis = args.do_error_analysis do_future_performance_analysis = args.do_future_performance_analysis do_flush_analysis = args.do_flush_analysis input_seq_len = checkpoint['input_seq_len'] scalers = checkpoint.get('scalers', None) disable_parent_input = checkpoint['disable_parent_input'] # Load model device = 'cuda' if torch.cuda.is_available() else 'cpu' model = HERA(**checkpoint['model_creation_args']).to(device) model.load_state_dict(checkpoint['model_state_dict']) model.eval() observed_fine_actions_per_video, observed_coarse_actions_per_video = [], [] fine_transition_action_per_video, coarse_transition_action_per_video = [], [] flushes_per_video, ground_truth_flushes_per_video = [], [] predicted_fine_actions_per_video, predicted_coarse_actions_per_video = [], [] predicted_fine_steps_per_video, predicted_coarse_steps_per_video = [], [] unobserved_fine_actions_per_video, unobserved_coarse_actions_per_video = [], [] fine_label_files = set(os.listdir(fine_labels_path)) coarse_label_files = set(os.listdir(coarse_labels_path)) label_files = sorted(fine_label_files & coarse_label_files) for label_file in label_files: with open(os.path.join(fine_labels_path, label_file), mode='r') as f: fine_actions_per_frame = [line.rstrip() for line in f] with open(os.path.join(coarse_labels_path, label_file), mode='r') as f: coarse_actions_per_frame = [line.rstrip() for line in f] if ignore_silence_action is not None: fine_actions_per_frame = [fine_action for fine_action in fine_actions_per_frame if fine_action != ignore_silence_action] coarse_actions_per_frame = [coarse_action for coarse_action in coarse_actions_per_frame if coarse_action != ignore_silence_action] fine_actions_per_frame, coarse_actions_per_frame = \ extend_smallest_list(fine_actions_per_frame, coarse_actions_per_frame) observed_fine_actions, unobserved_fine_actions = split_observed_actions(fine_actions_per_frame, fraction_observed=fraction_observed) observed_fine_actions_per_video.append(observed_fine_actions) fine_transition_action_per_video.append(observed_fine_actions[-1]) observed_coarse_actions, unobserved_coarse_actions = split_observed_actions(coarse_actions_per_frame, fraction_observed=fraction_observed) observed_coarse_actions_per_video.append(observed_coarse_actions) coarse_transition_action_per_video.append(observed_coarse_actions[-1]) tensors = generate_test_datum(observed_fine_actions, observed_coarse_actions, input_seq_len=input_seq_len, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, disable_parent_input=disable_parent_input, num_frames=len(fine_actions_per_frame), scalers=scalers, coarse_is_complete=False) tensors = [nan_to_value(tensor, value=0.0) for tensor in tensors] tensors = numpy_to_torch(*tensors, device=device) predicted_actions, predicted_steps, dx_dec_fine = \ predict_future_actions(model, tensors, fine_id_to_action=fine_id_to_action, coarse_id_to_action=coarse_id_to_action, disable_parent_input=disable_parent_input, num_frames=len(fine_actions_per_frame), maximum_prediction_length=len(unobserved_fine_actions), observed_fine_actions=observed_fine_actions, observed_coarse_actions=observed_coarse_actions, fine_action_to_id=fine_action_to_id, coarse_action_to_id=coarse_action_to_id, scalers=scalers) flushes_per_video.append(dx_dec_fine) ground_truth_flushes = compute_ground_truth_flushes(observed_coarse_actions[-1], observed_fine_actions[-1], unobserved_coarse_actions, unobserved_fine_actions) ground_truth_flushes_per_video.append(ground_truth_flushes) predicted_fine_steps, predicted_coarse_steps = predicted_steps predicted_fine_steps_per_video.append(predicted_fine_steps) predicted_coarse_steps_per_video.append(predicted_coarse_steps) predicted_fine_actions, predicted_coarse_actions = predicted_actions if not predicted_fine_actions: predicted_fine_actions = ['FAILED_TO_PREDICT'] predicted_fine_actions = extend_or_trim_predicted_actions(predicted_fine_actions, unobserved_fine_actions) predicted_fine_actions = np.array(predicted_fine_actions) predicted_fine_actions_per_video.append(predicted_fine_actions) unobserved_fine_actions = np.array(unobserved_fine_actions) unobserved_fine_actions_per_video.append(unobserved_fine_actions) if not predicted_coarse_actions: predicted_coarse_actions = ['FAILED_TO_PREDICT'] predicted_coarse_actions = extend_or_trim_predicted_actions(predicted_coarse_actions, unobserved_coarse_actions) predicted_coarse_actions = np.array(predicted_coarse_actions) predicted_coarse_actions_per_video.append(predicted_coarse_actions) unobserved_coarse_actions = np.array(unobserved_coarse_actions) unobserved_coarse_actions_per_video.append(unobserved_coarse_actions) # Performance and Error Analysis f1_results_dict = {} moc_results_dict = {} unobserved_fractions = [0.1, 0.2, 0.3, 0.5, 0.7, 0.8] unobserved_fractions = [unobserved_fraction for unobserved_fraction in unobserved_fractions if fraction_observed + unobserved_fraction <= 1.0] for unobserved_fraction in unobserved_fractions: save_analysis_path = os.path.join(args.checkpoint[:-4], str(fraction_observed) + '_' + str(unobserved_fraction)) global_fraction_unobserved = 1.0 - fraction_observed predicted_fine_actions_per_video_sub, unobserved_fine_actions_per_video_sub = [], [] predicted_coarse_actions_per_video_sub, unobserved_coarse_actions_per_video_sub = [], [] f1_per_video = [] # file_name, coarse_0.5_f1, fine_0.5_f1 for i, (predicted_fine_actions, unobserved_fine_actions, predicted_coarse_actions, unobserved_coarse_actions) in \ enumerate(zip(predicted_fine_actions_per_video, unobserved_fine_actions_per_video, predicted_coarse_actions_per_video, unobserved_coarse_actions_per_video)): num_frames_to_grab = (len(unobserved_fine_actions) / global_fraction_unobserved) * unobserved_fraction num_frames_to_grab = round(num_frames_to_grab) predicted_fine_actions_sub = predicted_fine_actions[:num_frames_to_grab] predicted_fine_actions_per_video_sub.append(predicted_fine_actions_sub) unobserved_fine_actions_sub = unobserved_fine_actions[:num_frames_to_grab] unobserved_fine_actions_per_video_sub.append(unobserved_fine_actions_sub) predicted_coarse_actions_sub = predicted_coarse_actions[:num_frames_to_grab] predicted_coarse_actions_per_video_sub.append(predicted_coarse_actions_sub) unobserved_coarse_actions_sub = unobserved_coarse_actions[:num_frames_to_grab] unobserved_coarse_actions_per_video_sub.append(unobserved_coarse_actions_sub) if do_error_analysis: predicted_fine_steps = predicted_fine_steps_per_video[i] steps_to_grab = compute_steps_to_grab(predicted_fine_steps, num_frames_to_grab) predicted_fine_steps = predicted_fine_steps[:steps_to_grab] predicted_coarse_steps = predicted_coarse_steps_per_video[i][:steps_to_grab] coarse_actions_per_frame = (observed_coarse_actions_per_video[i] + unobserved_coarse_actions_per_video[i].tolist()) analyse_hierarchical_observations_and_predictions(predicted_fine_steps, predicted_coarse_steps, observed_fine_actions_per_video[i], observed_coarse_actions_per_video[i], unobserved_fine_actions_sub, unobserved_coarse_actions_sub, coarse_actions_per_frame_full=coarse_actions_per_frame, save_path=save_analysis_path, save_file_name=label_files[i]) _, f1_fine_scores = compute_metrics([predicted_fine_actions_sub], [unobserved_fine_actions_sub], action_to_id=fine_action_to_id) _, f1_coarse_scores = compute_metrics([predicted_coarse_actions_sub], [unobserved_coarse_actions_sub], action_to_id=coarse_action_to_id) f1_per_video.append([label_files[i], f1_coarse_scores[-1], f1_fine_scores[-1]]) if do_error_analysis: write_results_per_video(f1_per_video, order_by='coarse', metric_name='f1-0.5', save_path=save_analysis_path) write_results_per_video(f1_per_video, order_by='fine', metric_name='f1-0.5', save_path=save_analysis_path) if do_future_performance_analysis: analyse_performance_per_future_action(predicted_coarse_actions_per_video_sub, unobserved_coarse_actions_per_video_sub, transition_action_per_video=coarse_transition_action_per_video, save_path=save_analysis_path, extra_str='Coarse') analyse_performance_per_future_action(predicted_fine_actions_per_video_sub, unobserved_fine_actions_per_video_sub, transition_action_per_video=fine_transition_action_per_video, save_path=save_analysis_path, mode='a', extra_str='Fine') print('\nObserved fraction: %.2f | Unobserved fraction: %.2f' % (fraction_observed, unobserved_fraction)) print('-> Fine') overlaps, f1_overlap_scores = compute_metrics(predicted_fine_actions_per_video_sub, unobserved_fine_actions_per_video_sub, action_to_id=fine_action_to_id) for overlap, overlap_f1_score in zip(overlaps, f1_overlap_scores): print('F1@%.2f: %.4f' % (overlap, overlap_f1_score)) f1_results_dict[f'fine-{fraction_observed}_{unobserved_fraction}_{overlap}'] = overlap_f1_score fine_moc, _, _ = compute_moc(np.concatenate(predicted_fine_actions_per_video_sub), np.concatenate(unobserved_fine_actions_per_video_sub), action_to_id=fine_action_to_id) print(f'MoC: {fine_moc:.4f}') moc_results_dict[f'fine-moc-{fraction_observed}_{unobserved_fraction}'] = fine_moc print('-> Coarse') overlaps, f1_overlap_scores = compute_metrics(predicted_coarse_actions_per_video_sub, unobserved_coarse_actions_per_video_sub, action_to_id=coarse_action_to_id) for overlap, overlap_f1_score in zip(overlaps, f1_overlap_scores): print('F1@%.2f: %.4f' % (overlap, overlap_f1_score)) f1_results_dict[f'coarse-{fraction_observed}_{unobserved_fraction}_{overlap}'] = overlap_f1_score coarse_moc, _, _ = compute_moc(np.concatenate(predicted_coarse_actions_per_video_sub), np.concatenate(unobserved_coarse_actions_per_video_sub), action_to_id=coarse_action_to_id) print(f'MoC: {coarse_moc:.4f}') moc_results_dict[f'coarse-moc-{fraction_observed}_{unobserved_fraction}'] = coarse_moc if do_flush_analysis: analyse_flushes_hierarchical(flushes_per_video, ground_truth_flushes_per_video, label_files, model.decoder_net.output_seq_len, save_path=args.checkpoint[:-4], encoder=False) results_dict = {**f1_results_dict, **moc_results_dict} return results_dict