コード例 #1
0
def main(out_dir=None, data_dir=None, labels_dir=None):
    out_dir = os.path.expanduser(out_dir)
    data_dir = os.path.expanduser(data_dir)
    labels_dir = os.path.expanduser(labels_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    filenames = [
        utils.stripExtension(fn)
        for fn in glob.glob(os.path.join(labels_dir, '*.csv'))
    ]

    metadata = utils.loadMetadata(data_dir)
    metadata['seq_id'] = metadata.index
    metadata = metadata.set_index(
        'dir_name', drop=False).loc[filenames].set_index('seq_id')

    seq_ids = np.sort(metadata.index.to_numpy())
    logger.info(f"Loaded {len(seq_ids)} sequences from {labels_dir}")

    vocab = []
    for i, seq_id in enumerate(seq_ids):
        seq_id_str = f"seq={seq_id}"
        seq_dir_name = metadata['dir_name'].loc[seq_id]
        labels_fn = os.path.join(labels_dir, f'{seq_dir_name}.csv')
        event_labels = utils.loadVariable(f'{seq_id_str}_labels', data_dir)

        assembly_actions = pd.read_csv(labels_fn)
        label_seq = parseActions(assembly_actions, event_labels.shape[0],
                                 vocab)
        utils.saveVariable(label_seq, f'{seq_id_str}_label-seq', out_data_dir)

        plotLabels(os.path.join(fig_dir, f'{seq_id_str}_labels.png'),
                   label_seq)
        writeLabels(os.path.join(fig_dir, f'{seq_id_str}_labels.csv'),
                    label_seq, vocab)

    utils.saveMetadata(metadata, out_data_dir)
    utils.saveVariable(vocab, 'vocab', out_data_dir)
コード例 #2
0
def main(out_dir=None,
         data_dir=None,
         annotation_dir=None,
         frames_dir=None,
         col_format='standard',
         win_params={},
         slowfast_csv_params={},
         label_types=('event', 'action', 'part')):
    out_dir = os.path.expanduser(out_dir)
    data_dir = os.path.expanduser(data_dir)
    annotation_dir = os.path.expanduser(annotation_dir)
    frames_dir = os.path.expanduser(frames_dir)

    annotation_dir = os.path.join(annotation_dir, 'action_annotations')

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_labels_dir = os.path.join(out_dir, 'labels')
    if not os.path.exists(out_labels_dir):
        os.makedirs(out_labels_dir)

    data_dirs = {
        name: os.path.join(out_dir, f"{name}-dataset")
        for name in label_types
    }
    for name, dir_ in data_dirs.items():
        if not os.path.exists(dir_):
            os.makedirs(dir_)

    event_vocab_df, part_vocab, action_vocab = load_vocabs(
        os.path.join(data_dir, 'ANU_ikea_dataset', 'indexing_files',
                     'atomic_action_list.txt'))
    event_vocab_df.to_csv(os.path.join(out_labels_dir, 'event-vocab.csv'))
    event_vocab = event_vocab_df.index.tolist()
    vocabs = {'event': event_vocab, 'action': action_vocab, 'part': part_vocab}
    vocabs = {label_name: vocabs[label_name] for label_name in label_types}
    for name, vocab in vocabs.items():
        utils.saveVariable(vocab, 'vocab', data_dirs[name])

    label_fn = os.path.join(annotation_dir, 'gt_segments.json')
    seq_ids, event_labels, metadata = load_action_labels(
        label_fn, event_vocab_df)
    utils.saveMetadata(metadata, out_labels_dir)
    for name, dir_ in data_dirs.items():
        utils.saveMetadata(metadata, dir_)

    logger.info(f"Loaded {len(seq_ids)} sequences from {label_fn}")

    part_names = [name for name in part_vocab if name != '']
    col_names = [f"{name}_active" for name in part_names]
    integerizers = {
        label_name: {name: i
                     for i, name in enumerate(label_vocab)}
        for label_name, label_vocab in vocabs.items()
    }

    all_slowfast_labels_seg = collections.defaultdict(list)
    all_slowfast_labels_win = collections.defaultdict(list)
    counts = np.zeros((len(action_vocab), len(part_vocab)), dtype=int)
    for i, seq_id in enumerate(seq_ids):
        seq_id_str = f"seq={seq_id}"
        seq_dir_name = metadata['dir_name'].loc[seq_id]

        event_segs = event_labels[i]
        if not event_segs.any(axis=None):
            logger.warning(f"No event labels for sequence {seq_id}")
            continue

        event_data = make_event_data(
            event_segs,
            sorted(glob.glob(os.path.join(frames_dir, seq_dir_name,
                                          '*.jpg'))), integerizers['event'],
            integerizers['action'], integerizers['part'],
            event_vocab.index('NA'), action_vocab.index('NA'), False)

        event_wins = make_window_clips(event_data, vocabs['event'],
                                       vocabs['action'], **win_params)

        event_data.to_csv(os.path.join(out_labels_dir,
                                       f"{seq_id_str}_data.csv"),
                          index=False)
        event_segs.to_csv(os.path.join(out_labels_dir,
                                       f"{seq_id_str}_segs.csv"),
                          index=False)

        filenames = event_data['fn'].to_list()
        label_indices = {}
        for name in label_types:
            if name == 'part':
                label_indices[name] = event_data[col_names].to_numpy()
                seg_labels_slowfast = make_slowfast_labels(
                    event_segs[['start', 'end']],
                    getActivePart(event_segs[col_names], part_names),
                    event_data['fn'],
                    integerizers[name],
                    col_format=col_format)
                win_labels_slowfast = make_slowfast_labels(
                    event_wins[['start', 'end']],
                    getActivePart(event_wins[col_names], part_names),
                    event_data['fn'],
                    integerizers[name],
                    col_format=col_format)
            else:
                label_indices[name] = event_data[name].to_numpy()
                seg_labels_slowfast = make_slowfast_labels(
                    event_segs[['start', 'end']],
                    event_segs[name],
                    event_data['fn'],
                    integerizers[name],
                    col_format=col_format)
                win_labels_slowfast = make_slowfast_labels(
                    event_wins[['start', 'end']],
                    event_wins[name],
                    event_data['fn'],
                    integerizers[name],
                    col_format=col_format)
            utils.saveVariable(filenames, f'{seq_id_str}_frame-fns',
                               data_dirs[name])
            utils.saveVariable(label_indices[name], f'{seq_id_str}_labels',
                               data_dirs[name])
            seg_labels_slowfast.to_csv(
                os.path.join(data_dirs[name],
                             f'{seq_id_str}_slowfast-labels.csv'),
                **slowfast_csv_params)
            win_labels_slowfast.to_csv(
                os.path.join(data_dirs[name],
                             f'{seq_id_str}_slowfast-labels.csv'),
                **slowfast_csv_params)
            all_slowfast_labels_seg[name].append(seg_labels_slowfast)
            all_slowfast_labels_win[name].append(win_labels_slowfast)

        plot_event_labels(os.path.join(fig_dir, f"{seq_id_str}.png"),
                          label_indices['event'], label_indices['action'],
                          label_indices['part'], event_vocab, action_vocab,
                          part_names)

        for part_activity_row, action_index in zip(label_indices['part'],
                                                   label_indices['action']):
            for i, is_active in enumerate(part_activity_row):
                part_index = integerizers['part'][part_names[i]]
                counts[action_index, part_index] += int(is_active)

    for name, labels in all_slowfast_labels_seg.items():
        pd.concat(labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_seg.csv'),
            **slowfast_csv_params)
    for name, labels in all_slowfast_labels_win.items():
        pd.concat(labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_win.csv'),
            **slowfast_csv_params)

    utils.saveVariable(counts, 'action-part-counts', out_labels_dir)

    plt.matshow(counts)
    plt.xticks(ticks=range(len(part_vocab)),
               labels=part_vocab,
               rotation='vertical')
    plt.yticks(ticks=range(len(action_vocab)), labels=action_vocab)
    plt.savefig(os.path.join(fig_dir, 'action-part-coocurrence.png'),
                bbox_inches='tight')
コード例 #3
0
 def saveVariable(var, var_name, to_dir=out_data_dir):
     utils.saveVariable(var, var_name, to_dir)
コード例 #4
0
def main(
        out_dir=None, data_dir=None, use_vid_ids_from=None,
        output_data=None, magnitude_centering=None, resting_from_gt=None,
        remove_before_first_touch=None, include_signals=None, fig_type=None):

    data_dir = os.path.expanduser(data_dir)
    out_dir = os.path.expanduser(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    logger.info(f"Reading from: {data_dir}")
    logger.info(f"Writing to: {out_dir}")

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    def loadAll(seq_ids, var_name, from_dir=data_dir, prefix='trial='):
        all_data = tuple(
            utils.loadVariable(f"{prefix}{seq_id}_{var_name}", from_dir)
            for seq_id in seq_ids
        )
        return all_data

    def saveVariable(var, var_name, to_dir=out_data_dir):
        utils.saveVariable(var, var_name, to_dir)

    if fig_type is None:
        fig_type = 'multi'

    # Load data
    if use_vid_ids_from is None:
        trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
    else:
        use_vid_ids_from = os.path.expanduser(use_vid_ids_from)
        trial_ids = utils.getUniqueIds(use_vid_ids_from, prefix='trial-', to_array=True)

    accel_seqs = loadAll(trial_ids, 'accel-samples.pkl')
    gyro_seqs = loadAll(trial_ids, 'gyro-samples.pkl')
    action_seqs = loadAll(trial_ids, 'action-seq.pkl')
    rgb_timestamp_seqs = loadAll(trial_ids, 'rgb-frame-timestamp-seq.pkl')

    def validate_imu(seqs):
        def is_valid(d):
            return not any(np.isnan(x).any() for x in d.values())
        return np.array([is_valid(d) for d in seqs])

    imu_is_valid = validate_imu(accel_seqs) & validate_imu(gyro_seqs)
    logger.info(
        f"Ignoring {(~imu_is_valid).sum()} IMU sequences with NaN-valued samples "
        f"(of {len(imu_is_valid)} total)"
    )

    def chooseValid(seq):
        return tuple(x for x, is_valid in zip(seq, imu_is_valid) if is_valid)
    trial_ids = np.array(list(chooseValid(trial_ids)))
    accel_seqs = chooseValid(accel_seqs)
    gyro_seqs = chooseValid(gyro_seqs)
    action_seqs = chooseValid(action_seqs)
    rgb_timestamp_seqs = chooseValid(rgb_timestamp_seqs)

    vocab = []
    metadata = utils.loadMetadata(data_dir, rows=trial_ids)
    utils.saveMetadata(metadata, out_data_dir)
    utils.saveVariable(vocab, 'vocab', out_data_dir)

    def norm(x):
        norm = np.linalg.norm(imu.getImuSamples(x), axis=1)[:, None]
        return norm
    accel_mag_seqs = tuple(map(lambda x: dictToArray(x, transform=norm), accel_seqs))
    gyro_mag_seqs = tuple(map(lambda x: dictToArray(x, transform=norm), gyro_seqs))

    imu_timestamp_seqs = utils.batchProcess(makeTimestamps, accel_seqs, gyro_seqs)

    if remove_before_first_touch:
        before_first_touch_seqs = utils.batchProcess(
            beforeFirstTouch, action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs
        )

        num_ignored = sum(b is None for b in before_first_touch_seqs)
        logger.info(
            f"Ignoring {num_ignored} sequences without first-touch annotations "
            f"(of {len(before_first_touch_seqs)} total)"
        )
        trials_missing_first_touch = [
            i for b, i in zip(before_first_touch_seqs, trial_ids)
            if b is None
        ]
        logger.info(f"Trials without first touch: {trials_missing_first_touch}")

        def clip(signal, bool_array):
            return signal[~bool_array, ...]
        accel_mag_seqs = tuple(
            clip(signal, b) for signal, b in zip(accel_mag_seqs, before_first_touch_seqs)
            if b is not None
        )
        gyro_mag_seqs = tuple(
            clip(signal, b) for signal, b in zip(gyro_mag_seqs, before_first_touch_seqs)
            if b is not None
        )
        imu_timestamp_seqs = tuple(
            clip(signal, b) for signal, b in zip(imu_timestamp_seqs, before_first_touch_seqs)
            if b is not None
        )
        trial_ids = tuple(
            x for x, b in zip(trial_ids, before_first_touch_seqs)
            if b is not None
        )
        action_seqs = tuple(
            x for x, b in zip(action_seqs, before_first_touch_seqs)
            if b is not None
        )
        rgb_timestamp_seqs = tuple(
            x for x, b in zip(rgb_timestamp_seqs, before_first_touch_seqs)
            if b is not None
        )

    assembly_seqs = utils.batchProcess(
        parseActions,
        action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs
    )

    if output_data == 'components':
        accel_feat_seqs = accel_mag_seqs
        gyro_feat_seqs = gyro_mag_seqs
        unique_components = {frozenset(): 0}
        imu_label_seqs = zip(
            *tuple(
                labels.componentLabels(*args, unique_components)
                for args in zip(action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs)
            )
        )
        saveVariable(unique_components, 'unique_components')
    elif output_data == 'pairwise components':
        imu_label_seqs = utils.batchProcess(
            labels.pairwiseComponentLabels, assembly_seqs,
            static_kwargs={'lower_tri_only': True, 'include_action_labels': False}
        )
        accel_feat_seqs = tuple(map(imu.pairwiseFeats, accel_mag_seqs))
        gyro_feat_seqs = tuple(map(imu.pairwiseFeats, gyro_mag_seqs))
    else:
        raise AssertionError()

    signals = {'accel': accel_feat_seqs, 'gyro': gyro_feat_seqs}
    if include_signals is None:
        include_signals = tuple(signals.keys())
    signals = tuple(signals[key] for key in include_signals)
    imu_feature_seqs = tuple(np.stack(x, axis=-1).squeeze(axis=-1) for x in zip(*signals))

    video_seqs = tuple(zip(imu_feature_seqs, imu_label_seqs, trial_ids))
    imu.plot_prediction_eg(video_seqs, fig_dir, fig_type=fig_type, output_data=output_data)

    video_seqs = tuple(
        zip(assembly_seqs, imu_feature_seqs, imu_timestamp_seqs, imu_label_seqs, trial_ids)
    )
    for assembly_seq, feature_seq, timestamp_seq, label_seq, trial_id in video_seqs:
        id_string = f"trial={trial_id}"
        saveVariable(assembly_seq, f'{id_string}_assembly-seq')
        saveVariable(feature_seq, f'{id_string}_feature-seq')
        saveVariable(timestamp_seq, f'{id_string}_timestamp-seq')
        saveVariable(label_seq, f'{id_string}_label-seq')
コード例 #5
0
def main(out_dir=None,
         data_dir=None,
         event_scores_dir=None,
         action_scores_dir=None,
         part_scores_dir=None,
         as_atomic_events=False,
         only_fold=None,
         plot_io=None,
         prefix='seq=',
         results_file=None,
         sweep_param_name=None,
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    event_scores_dir = os.path.expanduser(event_scores_dir)
    action_scores_dir = os.path.expanduser(action_scores_dir)
    part_scores_dir = os.path.expanduser(part_scores_dir)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    loader = DataLoader(
        {
            'event': os.path.join(data_dir, 'event-dataset'),
            'action': os.path.join(data_dir, 'action-dataset'),
            'part': os.path.join(data_dir, 'part-dataset')
        }, {
            'event': event_scores_dir,
            'action': action_scores_dir,
            'part': part_scores_dir
        },
        prefix=prefix)
    logger.info(
        f"Loaded ids for {len(loader.seq_ids)} sequences from {data_dir}")

    all_metrics = collections.defaultdict(list)

    if as_atomic_events:
        map_df = pd.read_csv(os.path.join(data_dir, 'labels',
                                          'event-vocab.csv'),
                             index_col=False,
                             keep_default_na=False)
    else:
        map_df = pd.DataFrame(
            [[f"{action}({part})", action] +
             [part == n for n in loader.vocabs['part'] if n]
             for action in loader.vocabs['action']
             for part in loader.vocabs['part']],
            columns=['event', 'action'] +
            [f"{n}_active" for n in loader.vocabs['part'] if n])
    event_vocab = map_df['event'].to_list()
    utils.saveVariable(event_vocab, 'vocab', out_data_dir)

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(loader.seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    for cv_index, cv_fold in enumerate(cv_folds):
        if only_fold is not None and cv_index != only_fold:
            continue

        train_indices, val_indices, test_indices = cv_fold
        logger.info(
            f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
            f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
        )

        action_part_counts = sum(
            count_action_part_bigrams(loader.vocabs['action'],
                                      loader.vocabs['part'], la, lp)
            for la, lp in loader._getLabels(train_indices,
                                            label_name=['action', 'part'],
                                            load_from_data=True))
        model = AttributeClassifier(action_part_counts)

        plot_action_part_bigrams(
            loader.vocabs['action'], loader.vocabs['part'],
            (model.action_part_counts > 0).astype(int),
            os.path.join(fig_dir,
                         f'cvfold={cv_index}_action-part-coocurrence.png'))

        for i in test_indices:
            seq_id = loader.seq_ids[i]
            logger.info(f"  Processing sequence {seq_id}...")

            action_score_seq, part_score_seq, true_action_seq, true_part_seq = loader[
                seq_id]
            event_score_seq = model.forward(action_score_seq, part_score_seq)
            pred_action_seq, pred_part_seq = model.predict(event_score_seq)

            true_event_seq = ap_to_event(true_action_seq, true_part_seq,
                                         map_df, loader.vocabs)
            pred_event_seq = ap_to_event(pred_action_seq, pred_part_seq,
                                         map_df, loader.vocabs)

            metric_dict = {}
            metric_dict = eval_metrics(pred_event_seq,
                                       true_event_seq,
                                       name_prefix='Event ',
                                       append_to=metric_dict)
            metric_dict = eval_metrics(pred_action_seq,
                                       true_action_seq,
                                       name_prefix='Action ',
                                       append_to=metric_dict)
            metric_dict = eval_metrics(pred_part_seq,
                                       true_part_seq,
                                       name_prefix='Part ',
                                       append_to=metric_dict)
            for name, value in metric_dict.items():
                logger.info(f"    {name}: {value * 100:.2f}%")
                all_metrics[name].append(value)

            seq_id_str = f"seq={seq_id}"
            utils.saveVariable(event_score_seq, f'{seq_id_str}_score-seq',
                               out_data_dir)
            utils.saveVariable(true_event_seq, f'{seq_id_str}_true-label-seq',
                               out_data_dir)
            utils.saveVariable(pred_event_seq, f'{seq_id_str}_pred-label-seq',
                               out_data_dir)
            utils.writeResults(results_file, metric_dict, sweep_param_name,
                               model_params)

            if plot_io:
                utils.plot_array(
                    event_score_seq.reshape(event_score_seq.shape[0], -1).T,
                    (true_event_seq, pred_event_seq), ('true', 'pred'),
                    fn=os.path.join(fig_dir, f"seq={seq_id:03d}_event.png"))
                utils.plot_array(action_score_seq.T,
                                 (true_action_seq, pred_action_seq),
                                 ('true', 'pred'),
                                 fn=os.path.join(
                                     fig_dir, f"seq={seq_id:03d}_action.png"))
                utils.plot_array(part_score_seq.T,
                                 (true_part_seq, pred_part_seq),
                                 ('true', 'pred'),
                                 fn=os.path.join(fig_dir,
                                                 f"seq={seq_id:03d}_part.png"))
コード例 #6
0
def main(out_dir=None,
         data_dir=None,
         segs_dir=None,
         scores_dir=None,
         vocab_dir=None,
         label_type='edges',
         gpu_dev_id=None,
         start_from=None,
         stop_at=None,
         num_disp_imgs=None,
         results_file=None,
         sweep_param_name=None,
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    segs_dir = os.path.expanduser(segs_dir)
    scores_dir = os.path.expanduser(scores_dir)
    vocab_dir = os.path.expanduser(vocab_dir)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    io_dir_images = os.path.join(fig_dir, 'model-io_images')
    if not os.path.exists(io_dir_images):
        os.makedirs(io_dir_images)

    io_dir_plots = os.path.join(fig_dir, 'model-io_plots')
    if not os.path.exists(io_dir_plots):
        os.makedirs(io_dir_plots)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    seq_ids = utils.getUniqueIds(scores_dir,
                                 prefix='trial=',
                                 suffix='score-seq.*',
                                 to_array=True)

    logger.info(
        f"Loaded scores for {len(seq_ids)} sequences from {scores_dir}")

    link_vocab = {}
    joint_vocab = {}
    joint_type_vocab = {}
    vocab, parts_vocab, part_labels = load_vocab(link_vocab, joint_vocab,
                                                 joint_type_vocab, vocab_dir)
    pred_vocab = []  # FIXME

    if label_type == 'assembly':
        logger.info("Converting assemblies -> edges")
        state_pred_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_pred-label-seq", scores_dir)
            for seq_id in seq_ids)
        state_true_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_true-label-seq", scores_dir)
            for seq_id in seq_ids)
        edge_pred_seqs = tuple(part_labels[seq] for seq in state_pred_seqs)
        edge_true_seqs = tuple(part_labels[seq] for seq in state_true_seqs)
    elif label_type == 'edge':
        logger.info("Converting edges -> assemblies (will take a few minutes)")
        edge_pred_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_pred-label-seq", scores_dir)
            for seq_id in seq_ids)
        edge_true_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_true-label-seq", scores_dir)
            for seq_id in seq_ids)
        state_pred_seqs = tuple(
            edges_to_assemblies(seq, pred_vocab, parts_vocab, part_labels)
            for seq in edge_pred_seqs)
        state_true_seqs = tuple(
            edges_to_assemblies(seq, vocab, parts_vocab, part_labels)
            for seq in edge_true_seqs)

    device = torchutils.selectDevice(gpu_dev_id)
    dataset = sim2real.LabeledConnectionDataset(
        utils.loadVariable('parts-vocab', vocab_dir),
        utils.loadVariable('part-labels', vocab_dir),
        utils.loadVariable('vocab', vocab_dir),
        device=device)

    all_metrics = collections.defaultdict(list)

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    for cv_index, cv_fold in enumerate(cv_folds):
        train_indices, val_indices, test_indices = cv_fold
        logger.info(
            f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
            f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
        )

        train_states = np.hstack(
            tuple(state_true_seqs[i] for i in (train_indices)))
        train_edges = part_labels[train_states]
        # state_train_vocab = np.unique(train_states)
        # edge_train_vocab = part_labels[state_train_vocab]
        train_freq_bigram, train_freq_unigram = edge_joint_freqs(train_edges)
        # state_probs = utils.makeHistogram(len(vocab), train_states, normalize=True)

        test_states = np.hstack(
            tuple(state_true_seqs[i] for i in (test_indices)))
        test_edges = part_labels[test_states]
        # state_test_vocab = np.unique(test_states)
        # edge_test_vocab = part_labels[state_test_vocab]
        test_freq_bigram, test_freq_unigram = edge_joint_freqs(test_edges)

        f, axes = plt.subplots(1, 2)
        axes[0].matshow(train_freq_bigram)
        axes[0].set_title('Train')
        axes[1].matshow(test_freq_bigram)
        axes[1].set_title('Test')
        plt.tight_layout()
        plt.savefig(
            os.path.join(fig_dir, f"edge-freqs-bigram_cvfold={cv_index}.png"))

        f, axis = plt.subplots(1)
        axis.stem(train_freq_unigram,
                  label='Train',
                  linefmt='C0-',
                  markerfmt='C0o')
        axis.stem(test_freq_unigram,
                  label='Test',
                  linefmt='C1--',
                  markerfmt='C1o')
        plt.legend()
        plt.tight_layout()
        plt.savefig(
            os.path.join(fig_dir, f"edge-freqs-unigram_cvfold={cv_index}.png"))

        for i in test_indices:
            seq_id = seq_ids[i]
            logger.info(f"  Processing sequence {seq_id}...")

            trial_prefix = f"trial={seq_id}"
            # I include the '.' to differentiate between 'rgb-frame-seq' and
            # 'rgb-frame-seq-before-first-touch'
            # rgb_seq = utils.loadVariable(f"{trial_prefix}_rgb-frame-seq.", data_dir)
            # seg_seq = utils.loadVariable(f"{trial_prefix}_seg-labels-seq", segs_dir)
            score_seq = utils.loadVariable(f"{trial_prefix}_score-seq",
                                           scores_dir)
            # if score_seq.shape[0] != rgb_seq.shape[0]:
            #     err_str = f"scores shape {score_seq.shape} != data shape {rgb_seq.shape}"
            #     raise AssertionError(err_str)

            edge_pred_seq = edge_pred_seqs[i]
            edge_true_seq = edge_true_seqs[i]
            state_pred_seq = state_pred_seqs[i]
            state_true_seq = state_true_seqs[i]

            num_types = np.unique(state_pred_seq).shape[0]
            num_samples = state_pred_seq.shape[0]
            num_total = len(pred_vocab)
            logger.info(
                f"    {num_types} assemblies predicted ({num_total} total); "
                f"{num_samples} samples")

            # edge_freq_bigram, edge_freq_unigram = edge_joint_freqs(edge_true_seq)
            # dist_shift = np.linalg.norm(train_freq_unigram - edge_freq_unigram)
            metric_dict = {
                # 'State OOV rate': oov_rate_state(state_true_seq, state_train_vocab),
                # 'Edge OOV rate': oov_rate_edges(edge_true_seq, edge_train_vocab),
                # 'State avg prob, true': state_probs[state_true_seq].mean(),
                # 'State avg prob, pred': state_probs[state_pred_seq].mean(),
                # 'Edge distribution shift': dist_shift
            }
            metric_dict = eval_edge_metrics(edge_pred_seq,
                                            edge_true_seq,
                                            append_to=metric_dict)
            metric_dict = eval_state_metrics(state_pred_seq,
                                             state_true_seq,
                                             append_to=metric_dict)
            for name, value in metric_dict.items():
                logger.info(f"    {name}: {value * 100:.2f}%")
                all_metrics[name].append(value)

            utils.writeResults(results_file, metric_dict, sweep_param_name,
                               model_params)

            if num_disp_imgs is not None:
                pred_images = tuple(
                    render(dataset, vocab[seg_label])
                    for seg_label in utils.computeSegments(state_pred_seq)[0])
                imageprocessing.displayImages(
                    *pred_images,
                    file_path=os.path.join(
                        io_dir_images,
                        f"seq={seq_id:03d}_pred-assemblies.png"),
                    num_rows=None,
                    num_cols=5)
                true_images = tuple(
                    render(dataset, vocab[seg_label])
                    for seg_label in utils.computeSegments(state_true_seq)[0])
                imageprocessing.displayImages(
                    *true_images,
                    file_path=os.path.join(
                        io_dir_images,
                        f"seq={seq_id:03d}_true-assemblies.png"),
                    num_rows=None,
                    num_cols=5)

                utils.plot_array(score_seq.T,
                                 (edge_true_seq.T, edge_pred_seq.T),
                                 ('true', 'pred'),
                                 fn=os.path.join(io_dir_plots,
                                                 f"seq={seq_id:03d}.png"))
コード例 #7
0
ファイル: decode.py プロジェクト: jd-jones/kinemparse
    def save_vocabs(self, out_dir):
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        for name, vocab in self.vocabs.items():
            utils.saveVariable(vocab, name.replace('_', '-'), out_dir)
コード例 #8
0
def main(out_dir=None,
         data_dir=None,
         assembly_data_dir=None,
         scores_dir=None,
         event_attr_fn=None,
         connection_attr_fn=None,
         assembly_attr_fn=None,
         only_fold=None,
         plot_io=None,
         prefix='seq=',
         stop_after=None,
         background_action='',
         model_params={},
         cv_params={},
         stride=None,
         results_file=None,
         sweep_param_name=None):

    data_dir = os.path.expanduser(data_dir)
    assembly_data_dir = os.path.expanduser(assembly_data_dir)
    scores_dir = os.path.expanduser(scores_dir)
    event_attr_fn = os.path.expanduser(event_attr_fn)
    connection_attr_fn = os.path.expanduser(connection_attr_fn)
    assembly_attr_fn = os.path.expanduser(assembly_attr_fn)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    misc_dir = os.path.join(out_dir, 'misc')
    if not os.path.exists(misc_dir):
        os.makedirs(misc_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    seq_ids = utils.getUniqueIds(data_dir,
                                 prefix=prefix,
                                 suffix='labels.*',
                                 to_array=True)

    dataset = utils.FeaturelessCvDataset(seq_ids,
                                         data_dir,
                                         prefix=prefix,
                                         label_fn_format='labels')

    logger.info(
        f"Loaded scores for {len(seq_ids)} sequences from {scores_dir}")

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    # Load event, connection attributes
    assembly_vocab = tuple(
        tuple(sorted(tuple(sorted(joint)) for joint in a))
        for a in utils.loadVariable('vocab', assembly_data_dir))
    (*probs, assembly_transition_probs), vocabs = loadPartInfo(
        event_attr_fn,
        connection_attr_fn,
        assembly_attr_fn,
        background_action=background_action,
        assembly_vocab=assembly_vocab)
    event_assembly_scores = event_to_assembly_scores(*probs, vocabs)
    assembly_transition_scores = np.log(assembly_transition_probs)
    viz_transition_probs(os.path.join(fig_dir, 'action-transitions'),
                         np.exp(event_assembly_scores), vocabs['event_vocab'])
    write_transition_probs(os.path.join(misc_dir, 'action-transitions'),
                           np.exp(event_assembly_scores),
                           vocabs['event_vocab'], vocabs['assembly_vocab'])

    for cv_index, cv_fold in enumerate(cv_folds):
        if only_fold is not None and cv_index != only_fold:
            continue

        train_indices, val_indices, test_indices = cv_fold
        logger.info(
            f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
            f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
        )

        train_data, val_data, test_data = dataset.getFold(cv_fold)

        cv_str = f'cvfold={cv_index}'

        class_priors, event_dur_probs = count_priors(train_data[0],
                                                     len(dataset.vocab),
                                                     stride=stride,
                                                     approx_upto=0.95,
                                                     support_only=True)
        event_dur_scores = np.log(event_dur_probs)
        event_dur_scores = np.zeros_like(event_dur_scores)
        scores = (event_dur_scores, event_assembly_scores,
                  assembly_transition_scores)

        model = decode.AssemblyActionRecognizer(scores, vocabs, model_params)

        viz_priors(os.path.join(fig_dir, f'{cv_str}_priors'), class_priors,
                   event_dur_probs)
        model.write_fsts(os.path.join(misc_dir, f'{cv_str}_fsts'))
        model.save_vocabs(os.path.join(out_data_dir, f'{cv_str}_model-vocabs'))

        for i, (_, seq_id) in enumerate(zip(*test_data)):
            if stop_after is not None and i >= stop_after:
                break

            trial_prefix = f"{prefix}{seq_id}"

            if model_params['return_label'] == 'input':
                true_seq = utils.loadVariable(f"{trial_prefix}_true-label-seq",
                                              scores_dir)
            elif model_params['return_label'] == 'output':
                try:
                    true_seq = utils.loadVariable(f"{trial_prefix}_label-seq",
                                                  assembly_data_dir)
                    true_seq = true_seq[::stride]
                except AssertionError:
                    # logger.info(f'  Skipping sequence {seq_id}: {e}')
                    continue

            logger.info(f"  Processing sequence {seq_id}...")

            event_score_seq = utils.loadVariable(f"{trial_prefix}_score-seq",
                                                 scores_dir)

            if event_score_seq.shape[0] != true_seq.shape[0]:
                err_str = (f'Event scores shape {event_score_seq.shape} '
                           f'!= labels shape {true_seq.shape}')
                raise AssertionError(err_str)

            # FIXME: the serialized variables are probs, not log-probs
            # event_score_seq = suppress_nonmax(event_score_seq)
            # event_score_seq = np.log(event_score_seq)

            decode_score_seq = model.forward(event_score_seq)
            pred_seq = model.predict(decode_score_seq)

            metric_dict = eval_metrics(pred_seq, true_seq)
            for name, value in metric_dict.items():
                logger.info(f"    {name}: {value * 100:.2f}%")
            utils.writeResults(results_file, metric_dict, sweep_param_name,
                               model_params)

            utils.saveVariable(decode_score_seq, f'{trial_prefix}_score-seq',
                               out_data_dir)
            utils.saveVariable(pred_seq, f'{trial_prefix}_pred-label-seq',
                               out_data_dir)
            utils.saveVariable(true_seq, f'{trial_prefix}_true-label-seq',
                               out_data_dir)

            if plot_io:
                utils.plot_array(event_score_seq.T, (pred_seq.T, true_seq.T),
                                 ('pred', 'true'),
                                 fn=os.path.join(fig_dir,
                                                 f"seq={seq_id:03d}.png"))
                write_labels(
                    os.path.join(misc_dir, f"seq={seq_id:03d}_pred-seq.txt"),
                    pred_seq, model.output_vocab.as_raw())
                write_labels(
                    os.path.join(misc_dir, f"seq={seq_id:03d}_true-seq.txt"),
                    true_seq, model.output_vocab.as_raw())
コード例 #9
0
def main(out_dir=None,
         data_dir=None,
         model_name=None,
         predict_mode='classify',
         gpu_dev_id=None,
         batch_size=None,
         learning_rate=None,
         independent_signals=None,
         active_only=None,
         output_dim_from_vocab=False,
         prefix='trial=',
         feature_fn_format='feature-seq.pkl',
         label_fn_format='label_seq.pkl',
         dataset_params={},
         model_params={},
         cv_params={},
         train_params={},
         viz_params={},
         metric_names=['Loss', 'Accuracy', 'Precision', 'Recall', 'F1'],
         plot_predictions=None,
         results_file=None,
         sweep_param_name=None):

    data_dir = os.path.expanduser(data_dir)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    def saveVariable(var, var_name, to_dir=out_data_dir):
        return utils.saveVariable(var, var_name, to_dir)

    # Load data
    device = torchutils.selectDevice(gpu_dev_id)
    trial_ids = utils.getUniqueIds(data_dir,
                                   prefix=prefix,
                                   suffix=feature_fn_format,
                                   to_array=True)
    dataset = utils.CvDataset(
        trial_ids,
        data_dir,
        prefix=prefix,
        feature_fn_format=feature_fn_format,
        label_fn_format=label_fn_format,
    )
    utils.saveMetadata(dataset.metadata, out_data_dir)
    utils.saveVariable(dataset.vocab, 'vocab', out_data_dir)

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(trial_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    if predict_mode == 'binary multiclass':
        # criterion = torchutils.BootstrappedCriterion(
        #     0.25, base_criterion=torch.nn.functional.binary_cross_entropy_with_logits,
        # )
        criterion = torch.nn.BCEWithLogitsLoss()
        labels_dtype = torch.float
    elif predict_mode == 'multiclass':
        criterion = torch.nn.CrossEntropyLoss()
        labels_dtype = torch.long
    elif predict_mode == 'classify':
        criterion = torch.nn.CrossEntropyLoss()
        labels_dtype = torch.long
    else:
        raise AssertionError()

    def make_dataset(feats, labels, ids, shuffle=True):
        dataset = torchutils.SequenceDataset(feats,
                                             labels,
                                             device=device,
                                             labels_dtype=labels_dtype,
                                             seq_ids=ids,
                                             **dataset_params)
        loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True)
        return dataset, loader

    for cv_index, cv_fold in enumerate(cv_folds):
        train_data, val_data, test_data = dataset.getFold(cv_fold)
        if independent_signals:
            train_data = splitSeqs(*train_data, active_only=active_only)
            val_data = splitSeqs(*val_data, active_only=active_only)
            test_data = splitSeqs(*test_data, active_only=False)
        train_set, train_loader = make_dataset(*train_data, shuffle=True)
        test_set, test_loader = make_dataset(*test_data, shuffle=False)
        val_set, val_loader = make_dataset(*val_data, shuffle=True)

        logger.info(
            f'CV fold {cv_index + 1} / {len(cv_folds)}: {len(dataset.trial_ids)} total '
            f'({len(train_set)} train, {len(val_set)} val, {len(test_set)} test)'
        )

        logger.info(f'{train_set.num_label_types} unique labels in train set; '
                    f'vocab size is {len(dataset.vocab)}')

        input_dim = train_set.num_obsv_dims
        output_dim = train_set.num_label_types
        if output_dim_from_vocab:
            output_dim = len(dataset.vocab)

        if model_name == 'linear':
            model = torchutils.LinearClassifier(
                input_dim, output_dim, **model_params).to(device=device)
        elif model_name == 'conv':
            model = ConvClassifier(input_dim, output_dim,
                                   **model_params).to(device=device)
        elif model_name == 'TCN':
            if predict_mode == 'multiclass':
                num_multiclass = train_set[0][1].shape[-1]
                output_dim = max([
                    train_set.num_label_types, test_set.num_label_types,
                    val_set.num_label_types
                ])
            else:
                num_multiclass = None
            model = TcnClassifier(input_dim,
                                  output_dim,
                                  num_multiclass=num_multiclass,
                                  **model_params).to(device=device)
        elif model_name == 'LSTM':
            if predict_mode == 'multiclass':
                num_multiclass = train_set[0][1].shape[-1]
                output_dim = max([
                    train_set.num_label_types, test_set.num_label_types,
                    val_set.num_label_types
                ])
            else:
                num_multiclass = None
            model = LstmClassifier(input_dim,
                                   output_dim,
                                   num_multiclass=num_multiclass,
                                   **model_params).to(device=device)
        else:
            raise AssertionError()

        optimizer_ft = torch.optim.Adam(model.parameters(),
                                        lr=learning_rate,
                                        betas=(0.9, 0.999),
                                        eps=1e-08,
                                        weight_decay=0,
                                        amsgrad=False)
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft,
                                                       step_size=1,
                                                       gamma=1.00)

        train_epoch_log = collections.defaultdict(list)
        val_epoch_log = collections.defaultdict(list)
        metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
        model, last_model_wts = torchutils.trainModel(
            model,
            criterion,
            optimizer_ft,
            lr_scheduler,
            train_loader,
            val_loader,
            device=device,
            metrics=metric_dict,
            train_epoch_log=train_epoch_log,
            val_epoch_log=val_epoch_log,
            **train_params)

        # Test model
        metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
        test_io_history = torchutils.predictSamples(
            model.to(device=device),
            test_loader,
            criterion=criterion,
            device=device,
            metrics=metric_dict,
            data_labeled=True,
            update_model=False,
            seq_as_batch=train_params['seq_as_batch'],
            return_io_history=True)
        if independent_signals:
            test_io_history = tuple(joinSeqs(test_io_history))

        logger.info('[TST]  ' +
                    '  '.join(str(m) for m in metric_dict.values()))
        utils.writeResults(results_file,
                           {k: v.value
                            for k, v in metric_dict.items()}, sweep_param_name,
                           model_params)

        if plot_predictions:
            io_fig_dir = os.path.join(fig_dir, 'model-io')
            if not os.path.exists(io_fig_dir):
                os.makedirs(io_fig_dir)

            label_names = ('gt', 'pred')
            preds, scores, inputs, gt_labels, ids = zip(*test_io_history)
            for batch in test_io_history:
                batch = tuple(
                    x.cpu().numpy() if isinstance(x, torch.Tensor) else x
                    for x in batch)
                for preds, _, inputs, gt_labels, seq_id in zip(*batch):
                    fn = os.path.join(io_fig_dir,
                                      f"{prefix}{seq_id}_model-io.png")
                    utils.plot_array(inputs, (gt_labels.T, preds.T),
                                     label_names,
                                     fn=fn)

        for batch in test_io_history:
            batch = tuple(x.cpu().numpy() if isinstance(x, torch.Tensor) else x
                          for x in batch)
            for pred_seq, score_seq, feat_seq, label_seq, trial_id in zip(
                    *batch):
                saveVariable(pred_seq, f'{prefix}{trial_id}_pred-label-seq')
                saveVariable(score_seq, f'{prefix}{trial_id}_score-seq')
                saveVariable(label_seq, f'{prefix}{trial_id}_true-label-seq')

        saveVariable(model, f'cvfold={cv_index}_{model_name}-best')

        train_fig_dir = os.path.join(fig_dir, 'train-plots')
        if not os.path.exists(train_fig_dir):
            os.makedirs(train_fig_dir)

        if train_epoch_log:
            torchutils.plotEpochLog(train_epoch_log,
                                    subfig_size=(10, 2.5),
                                    title='Training performance',
                                    fn=os.path.join(
                                        train_fig_dir,
                                        f'cvfold={cv_index}_train-plot.png'))

        if val_epoch_log:
            torchutils.plotEpochLog(val_epoch_log,
                                    subfig_size=(10, 2.5),
                                    title='Heldout performance',
                                    fn=os.path.join(
                                        train_fig_dir,
                                        f'cvfold={cv_index}_val-plot.png'))
コード例 #10
0
def main(out_dir=None,
         data_dir=None,
         scores_dir=None,
         frames_dir=None,
         vocab_from_scores_dir=None,
         only_fold=None,
         plot_io=None,
         prefix='seq=',
         results_file=None,
         sweep_param_name=None,
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    scores_dir = os.path.expanduser(scores_dir)
    frames_dir = os.path.expanduser(frames_dir)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    io_dir_images = os.path.join(fig_dir, 'model-io_images')
    if not os.path.exists(io_dir_images):
        os.makedirs(io_dir_images)

    io_dir_plots = os.path.join(fig_dir, 'model-io_plots')
    if not os.path.exists(io_dir_plots):
        os.makedirs(io_dir_plots)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    seq_ids = utils.getUniqueIds(scores_dir,
                                 prefix=prefix,
                                 suffix='pred-label-seq.*',
                                 to_array=True)

    logger.info(
        f"Loaded scores for {len(seq_ids)} sequences from {scores_dir}")

    if vocab_from_scores_dir:
        vocab = utils.loadVariable('vocab', scores_dir)
    else:
        vocab = utils.loadVariable('vocab', data_dir)

    all_metrics = collections.defaultdict(list)

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    all_pred_seqs = []
    all_true_seqs = []

    for cv_index, cv_fold in enumerate(cv_folds):
        if only_fold is not None and cv_index != only_fold:
            continue

        train_indices, val_indices, test_indices = cv_fold
        logger.info(
            f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
            f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
        )

        for i in test_indices:
            seq_id = seq_ids[i]
            logger.info(f"  Processing sequence {seq_id}...")

            trial_prefix = f"{prefix}{seq_id}"
            score_seq = utils.loadVariable(f"{trial_prefix}_score-seq",
                                           scores_dir)
            pred_seq = utils.loadVariable(f"{trial_prefix}_pred-label-seq",
                                          scores_dir)
            true_seq = utils.loadVariable(f"{trial_prefix}_true-label-seq",
                                          scores_dir)

            metric_dict = eval_metrics(pred_seq, true_seq)
            for name, value in metric_dict.items():
                logger.info(f"    {name}: {value * 100:.2f}%")
                all_metrics[name].append(value)

            utils.writeResults(results_file, metric_dict, sweep_param_name,
                               model_params)

            all_pred_seqs.append(pred_seq)
            all_true_seqs.append(true_seq)

            if plot_io:
                utils.plot_array(score_seq.T, (true_seq, pred_seq),
                                 ('true', 'pred'),
                                 fn=os.path.join(io_dir_plots,
                                                 f"seq={seq_id:03d}.png"))

    if False:
        confusions = metrics.confusionMatrix(all_pred_seqs, all_true_seqs,
                                             len(vocab))
        utils.saveVariable(confusions, "confusions", out_data_dir)

        per_class_acc, class_counts = metrics.perClassAcc(confusions,
                                                          return_counts=True)
        class_preds = confusions.sum(axis=1)
        logger.info(f"MACRO ACC: {np.nanmean(per_class_acc) * 100:.2f}%")

        metrics.plotConfusions(os.path.join(fig_dir, 'confusions.png'),
                               confusions, vocab)
        metrics.plotPerClassAcc(os.path.join(fig_dir,
                                             'per-class-results.png'), vocab,
                                per_class_acc, class_preds, class_counts)
コード例 #11
0
def main(out_dir=None,
         data_dir=None,
         actions_dir=None,
         parts_dir=None,
         events_dir=None,
         edges_dir=None,
         prefix='seq=',
         feature_fn_format='score-seq',
         label_fn_format='true-label-seq',
         stop_after=None,
         only_fold=None,
         plot_io=None,
         dataset_params={},
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    actions_dir = os.path.expanduser(actions_dir)
    parts_dir = os.path.expanduser(parts_dir)
    events_dir = os.path.expanduser(events_dir)
    edges_dir = os.path.expanduser(edges_dir)
    out_dir = os.path.expanduser(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    # vocab = utils.loadVariable(
    #     'assembly-action-vocab',
    #     os.path.join(data_dir, 'event-dataset')
    # )
    # vocab = [BlockAssembly()] + list(abs(x) for x in vocab)
    dataset = FusionDataset(
        actions_dir,
        parts_dir,
        events_dir,
        edges_dir,
        prefix=prefix,
        **dataset_params,
        # vocab=vocab,
    )
    utils.saveMetadata(dataset.metadata, out_data_dir)
    utils.saveVariable(dataset.vocab, 'vocab', out_data_dir)

    seq_ids = dataset.trial_ids
    logger.info(f"Loaded scores for {len(seq_ids)} sequences from {data_dir}")

    # Define cross-validation folds
    # cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    # utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    for i, seq_id in enumerate(seq_ids):
        try:
            labels = dataset.loadTargets(seq_id)
            features = dataset.loadInputs(seq_id)
        except AssertionError as e:
            logger.warning(f'Skipping sequence {seq_id}: {e}')
            continue

        logger.info(f"Processing sequence {seq_id}")

        if labels.shape[0] != features.shape[0]:
            message = f'Label shape {labels.shape} != feature shape {features.shape}'
            raise AssertionError(message)

        seq_prefix = f"seq={seq_id}"
        utils.saveVariable(features, f'{seq_prefix}_feature-seq', out_data_dir)
        utils.saveVariable(labels, f'{seq_prefix}_label-seq', out_data_dir)

        if plot_io:
            fn = os.path.join(fig_dir, f'{seq_prefix}.png')
            utils.plot_array(features.T, (labels.T, ), ('gt', ), fn=fn)
コード例 #12
0
def main(
        out_dir=None, data_dir=None, results_file=None, cv_file=None,
        take_log=False, col_format=None, win_params={}, slowfast_csv_params={}):
    out_dir = os.path.expanduser(out_dir)
    data_dir = os.path.expanduser(data_dir)
    results_file = os.path.expanduser(results_file)
    cv_file = os.path.expanduser(cv_file)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    vocab = utils.loadVariable('vocab', data_dir)
    metadata = utils.loadMetadata(data_dir)
    slowfast_labels = pd.read_csv(
        cv_file, keep_default_na=False, index_col=0,
        **slowfast_csv_params
    )
    seg_ids = slowfast_labels.index.to_numpy()
    vid_names = slowfast_labels['video_name'].unique().tolist()
    metadata['seq_id'] = metadata.index
    vid_ids = metadata.set_index('dir_name').loc[vid_names].set_index('seq_id').index
    metadata = metadata.drop('seq_id', axis=1)

    with open(results_file, 'rb') as file_:
        model_probs, gt_labels = pickle.load(file_)
        model_probs = model_probs.numpy()
        gt_labels = gt_labels.numpy()

    if len(model_probs) != len(seg_ids):
        err_str = f"{len(model_probs)} segment scores != {slowfast_labels.shape[0]} CSV rows"
        raise AssertionError(err_str)

    logger.info(f"Loaded {len(seg_ids)} segments, {len(vid_ids)} videos")

    for vid_id, vid_name in zip(vid_ids, vid_names):
        matches_video = (slowfast_labels['video_name'] == vid_name).to_numpy()
        win_labels = gt_labels[matches_video]
        win_probs = model_probs[matches_video, :]

        if win_labels.shape == win_probs.shape:
            win_preds = (win_probs > 0.5).astype(int)
        else:
            win_preds = win_probs.argmax(axis=1)

        if take_log:
            win_probs = np.log(win_probs)

        seq_id_str = f"seq={vid_id}"
        utils.saveVariable(win_probs, f'{seq_id_str}_score-seq', out_data_dir)
        utils.saveVariable(win_labels, f'{seq_id_str}_true-label-seq', out_data_dir)
        utils.saveVariable(win_preds, f'{seq_id_str}_pred-label-seq', out_data_dir)
        utils.plot_array(
            win_probs.T, (win_labels.T, win_preds.T), ('true', 'pred'),
            tick_names=vocab,
            fn=os.path.join(fig_dir, f"{seq_id_str}.png"),
            subplot_width=12, subplot_height=5
        )
    utils.saveVariable(vocab, 'vocab', out_data_dir)
    utils.saveMetadata(metadata, out_data_dir)
コード例 #13
0
def main(out_dir=None,
         data_dir=None,
         scores_dirs={},
         vocab_from_scores_dir=None,
         only_fold=None,
         plot_io=None,
         prefix='seq=',
         results_file=None,
         sweep_param_name=None,
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    scores_dirs = {
        name: os.path.expanduser(dir_)
        for name, dir_ in scores_dirs.items()
    }
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if len(scores_dirs) != 2:
        err_str = (
            f"scores_dirs has {len(scores_dirs)} entries, but this script "
            "compare exactly 2")
        raise NotImplementedError(err_str)

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    seq_ids = utils.getUniqueIds(data_dir,
                                 prefix=prefix,
                                 suffix='labels.*',
                                 to_array=True)

    logger.info(f"Loaded scores for {len(seq_ids)} sequences from {data_dir}")

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    vocabs = {}
    confusions = {}
    accs = {}
    counts = {}
    for expt_name, scores_dir in scores_dirs.items():
        if vocab_from_scores_dir:
            vocabs[expt_name] = utils.loadVariable('vocab', scores_dir)
        else:
            vocabs[expt_name] = utils.loadVariable('vocab', data_dir)

        confusions[expt_name] = utils.loadVariable("confusions", scores_dir)
        per_class_accs, class_counts = metrics.perClassAcc(
            confusions[expt_name], return_counts=True)

        accs[expt_name] = per_class_accs
        counts[expt_name] = class_counts

    vocab = utils.reduce_all_equal(tuple(vocabs.values()))
    class_counts = utils.reduce_all_equal(tuple(counts.values()))

    first_name, second_name = scores_dirs.keys()
    confusions_diff = confusions[first_name] - confusions[second_name]
    acc_diff = accs[first_name] - accs[second_name]

    metrics.plotConfusions(
        os.path.join(fig_dir,
                     f'confusions_{first_name}-minus-{second_name}.png'),
        confusions_diff, vocab)
    metrics.plotPerClassAcc(
        os.path.join(fig_dir, f'accs_{first_name}-minus-{second_name}.png'),
        vocab, acc_diff, confusions_diff.sum(axis=1), class_counts)
コード例 #14
0
def main(
        out_dir=None, modalities=['rgb', 'imu'], gpu_dev_id=None, plot_io=None,
        rgb_data_dir=None, rgb_attributes_dir=None, imu_data_dir=None, imu_attributes_dir=None):

    out_dir = os.path.expanduser(out_dir)
    rgb_data_dir = os.path.expanduser(rgb_data_dir)
    rgb_attributes_dir = os.path.expanduser(rgb_attributes_dir)
    imu_data_dir = os.path.expanduser(imu_data_dir)
    imu_attributes_dir = os.path.expanduser(imu_attributes_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    # Load data
    if modalities == ['rgb']:
        trial_ids = utils.getUniqueIds(rgb_data_dir, prefix='trial=', to_array=True)
        logger.info(f"Processing {len(trial_ids)} videos")
    else:
        rgb_trial_ids = utils.getUniqueIds(rgb_data_dir, prefix='trial=', to_array=True)
        imu_trial_ids = utils.getUniqueIds(imu_data_dir, prefix='trial=', to_array=True)
        trial_ids = np.array(sorted(set(rgb_trial_ids.tolist()) & set(imu_trial_ids.tolist())))
        logger.info(
            f"Processing {len(trial_ids)} videos common to "
            f"RGB ({len(rgb_trial_ids)} total) and IMU ({len(imu_trial_ids)} total)"
        )

    device = torchutils.selectDevice(gpu_dev_id)
    dataset = FusionDataset(
        trial_ids, rgb_attributes_dir, rgb_data_dir, imu_attributes_dir, imu_data_dir,
        device=device, modalities=modalities,
    )
    utils.saveMetadata(dataset.metadata, out_data_dir)
    utils.saveVariable(dataset.vocab, 'vocab', out_data_dir)

    for i, trial_id in enumerate(trial_ids):
        logger.info(f"Processing sequence {trial_id}...")

        true_label_seq = dataset.loadTargets(trial_id)
        attribute_feats = dataset.loadInputs(trial_id)

        # (Process the samples here if we need to)

        attribute_feats = attribute_feats.cpu().numpy()
        true_label_seq = true_label_seq.cpu().numpy()

        trial_prefix = f"trial={trial_id}"
        utils.saveVariable(attribute_feats, f'{trial_prefix}_feature-seq', out_data_dir)
        utils.saveVariable(true_label_seq, f'{trial_prefix}_label-seq', out_data_dir)

        if plot_io:
            fn = os.path.join(fig_dir, f'{trial_prefix}.png')
            utils.plot_array(
                attribute_feats.T,
                (true_label_seq,),
                ('gt',),
                fn=fn
            )
コード例 #15
0
def main(out_dir=None,
         metadata_file=None,
         corpus_name=None,
         default_annotator=None,
         metadata_criteria={},
         win_params={},
         slowfast_csv_params={},
         label_types=('event', 'action', 'part')):
    out_dir = os.path.expanduser(out_dir)
    metadata_file = os.path.expanduser(metadata_file)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_labels_dir = os.path.join(out_dir, 'labels')
    if not os.path.exists(out_labels_dir):
        os.makedirs(out_labels_dir)

    data_dirs = {
        name: os.path.join(out_dir, f"{name}-dataset")
        for name in label_types
    }
    for name, dir_ in data_dirs.items():
        if not os.path.exists(dir_):
            os.makedirs(dir_)
    assembly_data_dir = os.path.join(out_dir, 'assembly-dataset')
    if not os.path.exists(assembly_data_dir):
        os.makedirs(assembly_data_dir)

    (seq_ids, event_labels, assembly_seqs, frame_fn_seqs, frame_fn_idx_seqs,
     vocabs, assembly_action_vocab,
     metadata) = load_all_labels(corpus_name,
                                 default_annotator,
                                 metadata_file,
                                 metadata_criteria,
                                 start_video_from_first_touch=True,
                                 subsample_period=None,
                                 use_coarse_actions=True)

    assembly_vocab = []
    assembly_idx_seqs = tuple(
        tuple(labels.gen_eq_classes(state_seq, assembly_vocab))
        for state_seq in assembly_seqs)
    utils.saveVariable(assembly_vocab, 'vocab', assembly_data_dir)
    utils.saveVariable(assembly_action_vocab, 'assembly-action-vocab',
                       data_dirs['event'])
    vocabs = {label_name: vocabs[label_name] for label_name in label_types}
    for name, vocab in vocabs.items():
        utils.saveVariable(vocab, 'vocab', data_dirs[name])

    utils.saveMetadata(metadata, out_labels_dir)
    for name, dir_ in data_dirs.items():
        utils.saveMetadata(metadata, dir_)

    assembly_attrs = labels.blockConnectionsSeq(assembly_vocab)
    utils.saveVariable(assembly_attrs, 'assembly-attrs', assembly_data_dir)
    plt.matshow(assembly_attrs.T)
    plt.savefig(os.path.join(fig_dir, 'assembly-attrs.png'))

    logger.info(
        f"Loaded {len(seq_ids)} sequence labels from {corpus_name} dataset")

    part_names = [name for name in vocabs['part'] if name != '']
    col_names = [f"{name}_active" for name in part_names]
    integerizers = {
        label_name: {name: i
                     for i, name in enumerate(label_vocab)}
        for label_name, label_vocab in vocabs.items()
    }

    all_slowfast_labels_seg = collections.defaultdict(list)
    all_slowfast_labels_win = collections.defaultdict(list)
    counts = np.zeros((len(vocabs['action']), len(vocabs['part'])), dtype=int)
    for i, seq_id in enumerate(seq_ids):
        logger.info(f"Processing sequence {i + 1} / {len(seq_ids)}")

        seq_id_str = f"seq={seq_id}"

        event_segs = event_labels[i]
        frame_fns = frame_fn_seqs[i]
        frame_fn_idxs = frame_fn_idx_seqs[i]
        assembly_seq = assembly_seqs[i]
        assembly_label_seq = make_assembly_labels(assembly_seq,
                                                  assembly_idx_seqs[i],
                                                  **win_params)

        # video_dir = os.path.dirname(frame_fns[0]).split('/')[-1]
        video_dir = f"{seq_id}"

        event_data = make_event_data(event_segs, frame_fns, frame_fn_idxs,
                                     integerizers['event'],
                                     integerizers['action'],
                                     integerizers['part'],
                                     vocabs['event'].index(''),
                                     vocabs['action'].index(''), False)

        # Redefining event segments from the sequence catches background segments
        # that are not annotated in the source labels
        event_segs = make_clips(event_data,
                                vocabs['event'],
                                vocabs['action'],
                                clip_type='segment')
        event_wins = make_clips(event_data,
                                vocabs['event'],
                                vocabs['action'],
                                clip_type='window',
                                **win_params)

        for name in ('event', 'action'):
            event_segs[f'{name}_id'] = [
                integerizers[name][n] for n in event_segs[name]
            ]
            event_wins[f'{name}_id'] = [
                integerizers[name][n] for n in event_wins[name]
            ]

        event_data.to_csv(os.path.join(out_labels_dir,
                                       f"{seq_id_str}_data.csv"),
                          index=False)
        event_segs.to_csv(os.path.join(out_labels_dir,
                                       f"{seq_id_str}_segs.csv"),
                          index=False)
        event_wins.to_csv(os.path.join(out_labels_dir,
                                       f"{seq_id_str}_wins.csv"),
                          index=False)

        utils.saveVariable(assembly_label_seq, f'seq={seq_id}_label-seq',
                           assembly_data_dir)

        filenames = event_data['fn'].to_list()
        label_indices = {}
        bound_keys = ['start', 'end']
        for name in label_types:
            if name == 'part':
                label_indices[name] = event_data[col_names].to_numpy()
                label_keys = col_names
            else:
                label_indices[name] = event_data[name].to_numpy()
                label_keys = [f'{name}_id']

            seg_labels_slowfast = make_slowfast_labels(
                event_segs[bound_keys], event_segs[label_keys],
                [video_dir for _ in range(event_segs.shape[0])])
            win_labels_slowfast = make_slowfast_labels(
                event_wins[bound_keys], event_wins[label_keys],
                [video_dir for _ in range(event_wins.shape[0])])

            utils.saveVariable(filenames, f'{seq_id_str}_frame-fns',
                               data_dirs[name])
            utils.saveVariable(label_indices[name], f'{seq_id_str}_labels',
                               data_dirs[name])
            seg_labels_slowfast.to_csv(os.path.join(
                data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
                                       index=False,
                                       **slowfast_csv_params)
            win_labels_slowfast.to_csv(os.path.join(
                data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
                                       index=False,
                                       **slowfast_csv_params)

            all_slowfast_labels_seg[name].append(seg_labels_slowfast)
            all_slowfast_labels_win[name].append(win_labels_slowfast)

        plot_event_labels(os.path.join(fig_dir, f"{seq_id_str}.png"),
                          label_indices['event'], label_indices['action'],
                          label_indices['part'], vocabs['event'],
                          vocabs['action'], part_names)
        plot_assembly_labels(
            os.path.join(fig_dir, f"{seq_id_str}_assembly.png"),
            assembly_label_seq, label_indices['event'], vocabs['event'])

        for part_activity_row, action_index in zip(label_indices['part'],
                                                   label_indices['action']):
            for i, is_active in enumerate(part_activity_row):
                part_index = integerizers['part'][part_names[i]]
                counts[action_index, part_index] += int(is_active)

    for name, sf_labels in all_slowfast_labels_seg.items():
        pd.concat(sf_labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_seg.csv'),
            **slowfast_csv_params)
    for name, sf_labels in all_slowfast_labels_win.items():
        pd.concat(sf_labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_win.csv'),
            **slowfast_csv_params)

    utils.saveVariable(counts, 'action-part-counts', out_labels_dir)

    plt.matshow(counts)
    plt.xticks(ticks=range(len(vocabs['part'])),
               labels=vocabs['part'],
               rotation='vertical')
    plt.yticks(ticks=range(len(vocabs['action'])), labels=vocabs['action'])
    plt.savefig(os.path.join(fig_dir, 'action-part-coocurrence.png'),
                bbox_inches='tight')
コード例 #16
0
def main(
        out_dir=None, annotation_dir=None, frames_dir=None,
        win_params={}, slowfast_csv_params={}, label_types=('event', 'action', 'part')):
    out_dir = os.path.expanduser(out_dir)
    annotation_dir = os.path.expanduser(annotation_dir)
    frames_dir = os.path.expanduser(frames_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_labels_dir = os.path.join(out_dir, 'labels')
    if not os.path.exists(out_labels_dir):
        os.makedirs(out_labels_dir)

    data_dirs = {name: os.path.join(out_dir, f"{name}-dataset") for name in label_types}
    for name, dir_ in data_dirs.items():
        if not os.path.exists(dir_):
            os.makedirs(dir_)

    seq_ids, event_labels, vocabs, metadata = load_all_labels(annotation_dir)
    vocabs = {label_name: vocabs[label_name] for label_name in label_types}
    for name, vocab in vocabs.items():
        utils.saveVariable(vocab, 'vocab', data_dirs[name])

    utils.saveMetadata(metadata, out_labels_dir)
    for name, dir_ in data_dirs.items():
        utils.saveMetadata(metadata, dir_)

    logger.info(f"Loaded {len(seq_ids)} sequence labels from {annotation_dir}")

    part_names = [name for name in vocabs['part'] if name != '']
    col_names = [f"{name}_active" for name in part_names]
    integerizers = {
        label_name: {name: i for i, name in enumerate(label_vocab)}
        for label_name, label_vocab in vocabs.items()
    }

    all_slowfast_labels_seg = collections.defaultdict(list)
    all_slowfast_labels_win = collections.defaultdict(list)
    counts = np.zeros((len(vocabs['action']), len(vocabs['part'])), dtype=int)
    for i, seq_id in enumerate(seq_ids):
        logger.info(f"Processing sequence {i + 1} / {len(seq_ids)}")

        seq_id_str = f"seq={seq_id}"

        event_segs = event_labels[i]

        # Ignore 'check booklet' events because they don't have an impact on construction
        event_segs = event_segs.loc[event_segs['event'] != 'check_booklet']

        event_data = make_event_data(
            event_segs, sorted(glob.glob(os.path.join(frames_dir, f'{seq_id}', '*.jpg'))),
            integerizers['event'], integerizers['action'], integerizers['part'],
            vocabs['event'].index(''), vocabs['action'].index(''), False
        )

        # Redefining event segments from the sequence catches background segments
        # that are not annotated in the source labels
        event_segs = make_clips(
            event_data, vocabs['event'], vocabs['action'],
            clip_type='segment'
        )
        event_wins = make_clips(
            event_data, vocabs['event'], vocabs['action'],
            clip_type='window', **win_params
        )

        for name in ('event', 'action'):
            event_segs[f'{name}_id'] = [integerizers[name][n] for n in event_segs[name]]
            event_wins[f'{name}_id'] = [integerizers[name][n] for n in event_wins[name]]

        event_data.to_csv(os.path.join(out_labels_dir, f"{seq_id_str}_data.csv"), index=False)
        event_segs.to_csv(os.path.join(out_labels_dir, f"{seq_id_str}_segs.csv"), index=False)
        event_wins.to_csv(os.path.join(out_labels_dir, f"{seq_id_str}_wins.csv"), index=False)

        filenames = event_data['fn'].to_list()
        label_indices = {}
        bound_keys = ['start', 'end']
        for name in label_types:
            if name == 'part':
                label_indices[name] = event_data[col_names].to_numpy()
                label_keys = col_names
            else:
                label_indices[name] = event_data[name].to_numpy()
                label_keys = [f'{name}_id']

            seg_labels_slowfast = make_slowfast_labels(
                event_segs[bound_keys], event_segs[label_keys], event_data['fn']
            )
            win_labels_slowfast = make_slowfast_labels(
                event_wins[bound_keys], event_wins[label_keys], event_data['fn']
            )

            utils.saveVariable(filenames, f'{seq_id_str}_frame-fns', data_dirs[name])
            utils.saveVariable(label_indices[name], f'{seq_id_str}_labels', data_dirs[name])
            seg_labels_slowfast.to_csv(
                os.path.join(data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
                index=False, **slowfast_csv_params
            )
            win_labels_slowfast.to_csv(
                os.path.join(data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
                index=False, **slowfast_csv_params
            )

            all_slowfast_labels_seg[name].append(seg_labels_slowfast)
            all_slowfast_labels_win[name].append(win_labels_slowfast)

        plot_event_labels(
            os.path.join(fig_dir, f"{seq_id_str}.png"),
            label_indices['event'], label_indices['action'], label_indices['part'],
            vocabs['event'], vocabs['action'], part_names
        )

        for part_activity_row, action_index in zip(label_indices['part'], label_indices['action']):
            for i, is_active in enumerate(part_activity_row):
                part_index = integerizers['part'][part_names[i]]
                counts[action_index, part_index] += int(is_active)

    for name, labels in all_slowfast_labels_seg.items():
        pd.concat(labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_seg.csv'),
            **slowfast_csv_params
        )
    for name, labels in all_slowfast_labels_win.items():
        pd.concat(labels, axis=0).to_csv(
            os.path.join(data_dirs[name], 'slowfast-labels_win.csv'),
            **slowfast_csv_params
        )

    utils.saveVariable(counts, 'action-part-counts', out_labels_dir)

    plt.matshow(counts)
    plt.xticks(ticks=range(len(vocabs['part'])), labels=vocabs['part'], rotation='vertical')
    plt.yticks(ticks=range(len(vocabs['action'])), labels=vocabs['action'])
    plt.savefig(os.path.join(fig_dir, 'action-part-coocurrence.png'), bbox_inches='tight')