Example #1
0
def count_priors(label_seqs,
                 num_classes,
                 stride=None,
                 approx_upto=None,
                 support_only=False):
    dur_counts = {}
    class_counts = {}
    for label_seq in label_seqs:
        for label, dur in zip(*utils.computeSegments(label_seq[::stride])):
            class_counts[label] = class_counts.get(label, 0) + 1
            dur_counts[label, dur] = dur_counts.get((label, dur), 0) + 1

    class_priors = np.zeros((num_classes))
    for label, count in class_counts.items():
        class_priors[label] = count
    class_priors /= class_priors.sum()

    max_dur = max(dur for label, dur in dur_counts.keys())
    dur_priors = np.zeros((num_classes, max_dur))
    for (label, dur), count in dur_counts.items():
        assert dur
        dur_priors[label, dur - 1] = count
    dur_priors /= dur_priors.sum(axis=1, keepdims=True)

    if approx_upto is not None:
        cdf = dur_priors.cumsum(axis=1)
        approx_bounds = (cdf >= approx_upto).argmax(axis=1)
        dur_priors = dur_priors[:, :approx_bounds.max()]

    if support_only:
        dur_priors = (dur_priors > 0).astype(float)

    return class_priors, dur_priors
Example #2
0
def write_labels(fn, label_seq, vocab):
    seg_label_idxs, seg_durs = utils.computeSegments(label_seq)

    seg_durs = np.array(seg_durs)
    seg_ends = np.cumsum(seg_durs) - 1
    seg_starts = np.array([0] + (seg_ends + 1)[:-1].tolist())
    seg_labels = tuple(vocab[i] for i in seg_label_idxs)
    d = {'start': seg_starts, 'end': seg_ends, 'label': seg_labels}
    pd.DataFrame(d).to_csv(fn, index=False)
Example #3
0
def actionsFromAssemblies(assembly_index_seq, assembly_vocab, action_vocab):
    assembly_index_segs, seg_lens = utils.computeSegments(assembly_index_seq)
    assembly_segs = tuple(assembly_vocab[i] for i in assembly_index_segs)
    action_segs = (
        (lib_assembly.AssemblyAction(),)
        + tuple(n - c for c, n in zip(assembly_segs[:-1], assembly_segs[1:]))
    )
    action_index_segs = tuple(utils.getIndex(a, action_vocab) for a in action_segs)
    action_index_seq = np.array(utils.fromSegments(action_index_segs, seg_lens))

    return action_segs, action_index_seq
Example #4
0
    def _viterbiArcs(self, pred_state_idxs, arc_labels):
        pred_state_idxs = pred_state_idxs[0]
        state_segs, _ = utils.computeSegments(pred_state_idxs.tolist())

        pred_arc_idxs = -torch.ones_like(pred_state_idxs)
        for next_state_idx, cur_state_idx in zip(state_segs[1:],
                                                 state_segs[:-1]):
            pred_arc_idx = arc_labels[next_state_idx, cur_state_idx]
            is_cur_state = pred_state_idxs == cur_state_idx
            pred_arc_idxs[is_cur_state] = pred_arc_idx
        pred_arc_idxs = pred_arc_idxs[:-1]

        if (pred_arc_idxs < 0).any():
            err_str = f"Impossible transition decoded: {pred_arc_idxs}"
            raise AssertionError(err_str)

        return pred_arc_idxs
Example #5
0
def segmentDetections(detections, kmeans, return_means=False):
    nan_rows = np.isnan(detections[:, 0]) + np.isnan(detections[:, 1])
    non_nan_detections = detections[~nan_rows, :]
    closest_bins = kmeans.predict(non_nan_detections)
    bin_segs, durations = utils.computeSegments(closest_bins)

    if not return_means:
        return bin_segs

    i = 0
    mean_detections = []
    for seg_idx, duration in zip(bin_segs, durations):
        mean_detection = np.mean(non_nan_detections[i:i + duration + 1, :], axis=0)
        i += duration + 1
        mean_detections.append(mean_detection)
    mean_detections = np.row_stack(tuple(mean_detections))

    return bin_segs, mean_detections
Example #6
0
def make_clips(event_data,
               event_vocab,
               action_vocab,
               clip_type='window',
               stride=1,
               win_size=1):
    if clip_type == 'window':
        samples = range(0, event_data.shape[0], stride)
        clip_slices = utils.slidingWindowSlices(event_data,
                                                stride=stride,
                                                win_size=win_size,
                                                samples=samples)

        def get_clip_labels(arr):
            return [arr[i] for i in samples]
    elif clip_type == 'segment':
        _, seg_lens = utils.computeSegments(event_data['event'].to_numpy())
        clip_slices = tuple(utils.genSegSlices(seg_lens))

        def get_clip_labels(arr):
            return [utils.majorityVote(arr[sl]) for sl in clip_slices]
    else:
        err_str = (f"Unrecognized argument: clip_type={clip_type} "
                   "(accepted values are 'window' or 'segment')")
        raise ValueError(err_str)

    d = {
        name: get_clip_labels(event_data[name].to_numpy())
        for name in event_data.columns if name != 'fn'
    }
    d['event_id'] = d['event']
    d['event'] = [event_vocab[i] for i in d['event']]
    d['action_id'] = d['action']
    d['action'] = [action_vocab[i] for i in d['action']]

    fn_indices = event_data['fn_index'].to_numpy()
    d['start'] = fn_indices[[sl.start for sl in clip_slices]]
    d['end'] = fn_indices[[
        min(sl.stop, event_data.shape[0]) - 1 for sl in clip_slices
    ]]

    window_clips = pd.DataFrame(d)
    return window_clips
Example #7
0
def edges_to_assemblies(edge_label_seq, assembly_vocab, edge_vocab,
                        assembly_vocab_edges):
    rows, cols = np.tril_indices(8, k=-1)
    keys = {
        i: frozenset((int(r), int(c)))
        for i, (r, c) in enumerate(zip(rows, cols))
    }

    link_vocab = edge_vocab[keys[0]][0].link_vocab
    joint_vocab = edge_vocab[keys[0]][0].joint_vocab
    joint_type_vocab = edge_vocab[keys[0]][0].joint_type_vocab

    def get_assembly_label(edge_labels):
        all_edges_match = (edge_labels == assembly_vocab_edges).all(axis=1)
        if all_edges_match.any():
            assembly_labels = all_edges_match.nonzero()[0]
            if edge_labels.any() and assembly_labels.size != 1:
                AssertionError(
                    f"{assembly_labels.size} assemblies match these edges!")
            return assembly_labels[0]

        edges = tuple(edge_vocab[keys[edge_index]][edge_label - 1]
                      for edge_index, edge_label in enumerate(edge_labels)
                      if edge_label)
        assembly = lib_assembly.union(*edges,
                                      link_vocab=link_vocab,
                                      joint_vocab=joint_vocab,
                                      joint_type_vocab=joint_type_vocab)
        assembly_label = utils.getIndex(assembly, assembly_vocab)
        return assembly_label

    edge_segs, seg_lens = utils.computeSegments(edge_label_seq)
    assembly_segs = tuple(
        get_assembly_label(edge_labels) for edge_labels in edge_segs)
    assembly_label_seq = np.array(utils.fromSegments(assembly_segs, seg_lens))

    return assembly_label_seq
Example #8
0
def main(out_dir=None,
         data_dir=None,
         segs_dir=None,
         scores_dir=None,
         vocab_dir=None,
         label_type='edges',
         gpu_dev_id=None,
         start_from=None,
         stop_at=None,
         num_disp_imgs=None,
         results_file=None,
         sweep_param_name=None,
         model_params={},
         cv_params={}):

    data_dir = os.path.expanduser(data_dir)
    segs_dir = os.path.expanduser(segs_dir)
    scores_dir = os.path.expanduser(scores_dir)
    vocab_dir = os.path.expanduser(vocab_dir)
    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
    else:
        results_file = os.path.expanduser(results_file)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    io_dir_images = os.path.join(fig_dir, 'model-io_images')
    if not os.path.exists(io_dir_images):
        os.makedirs(io_dir_images)

    io_dir_plots = os.path.join(fig_dir, 'model-io_plots')
    if not os.path.exists(io_dir_plots):
        os.makedirs(io_dir_plots)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    seq_ids = utils.getUniqueIds(scores_dir,
                                 prefix='trial=',
                                 suffix='score-seq.*',
                                 to_array=True)

    logger.info(
        f"Loaded scores for {len(seq_ids)} sequences from {scores_dir}")

    link_vocab = {}
    joint_vocab = {}
    joint_type_vocab = {}
    vocab, parts_vocab, part_labels = load_vocab(link_vocab, joint_vocab,
                                                 joint_type_vocab, vocab_dir)
    pred_vocab = []  # FIXME

    if label_type == 'assembly':
        logger.info("Converting assemblies -> edges")
        state_pred_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_pred-label-seq", scores_dir)
            for seq_id in seq_ids)
        state_true_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_true-label-seq", scores_dir)
            for seq_id in seq_ids)
        edge_pred_seqs = tuple(part_labels[seq] for seq in state_pred_seqs)
        edge_true_seqs = tuple(part_labels[seq] for seq in state_true_seqs)
    elif label_type == 'edge':
        logger.info("Converting edges -> assemblies (will take a few minutes)")
        edge_pred_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_pred-label-seq", scores_dir)
            for seq_id in seq_ids)
        edge_true_seqs = tuple(
            utils.loadVariable(f"trial={seq_id}_true-label-seq", scores_dir)
            for seq_id in seq_ids)
        state_pred_seqs = tuple(
            edges_to_assemblies(seq, pred_vocab, parts_vocab, part_labels)
            for seq in edge_pred_seqs)
        state_true_seqs = tuple(
            edges_to_assemblies(seq, vocab, parts_vocab, part_labels)
            for seq in edge_true_seqs)

    device = torchutils.selectDevice(gpu_dev_id)
    dataset = sim2real.LabeledConnectionDataset(
        utils.loadVariable('parts-vocab', vocab_dir),
        utils.loadVariable('part-labels', vocab_dir),
        utils.loadVariable('vocab', vocab_dir),
        device=device)

    all_metrics = collections.defaultdict(list)

    # Define cross-validation folds
    cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
    utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)

    for cv_index, cv_fold in enumerate(cv_folds):
        train_indices, val_indices, test_indices = cv_fold
        logger.info(
            f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
            f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
        )

        train_states = np.hstack(
            tuple(state_true_seqs[i] for i in (train_indices)))
        train_edges = part_labels[train_states]
        # state_train_vocab = np.unique(train_states)
        # edge_train_vocab = part_labels[state_train_vocab]
        train_freq_bigram, train_freq_unigram = edge_joint_freqs(train_edges)
        # state_probs = utils.makeHistogram(len(vocab), train_states, normalize=True)

        test_states = np.hstack(
            tuple(state_true_seqs[i] for i in (test_indices)))
        test_edges = part_labels[test_states]
        # state_test_vocab = np.unique(test_states)
        # edge_test_vocab = part_labels[state_test_vocab]
        test_freq_bigram, test_freq_unigram = edge_joint_freqs(test_edges)

        f, axes = plt.subplots(1, 2)
        axes[0].matshow(train_freq_bigram)
        axes[0].set_title('Train')
        axes[1].matshow(test_freq_bigram)
        axes[1].set_title('Test')
        plt.tight_layout()
        plt.savefig(
            os.path.join(fig_dir, f"edge-freqs-bigram_cvfold={cv_index}.png"))

        f, axis = plt.subplots(1)
        axis.stem(train_freq_unigram,
                  label='Train',
                  linefmt='C0-',
                  markerfmt='C0o')
        axis.stem(test_freq_unigram,
                  label='Test',
                  linefmt='C1--',
                  markerfmt='C1o')
        plt.legend()
        plt.tight_layout()
        plt.savefig(
            os.path.join(fig_dir, f"edge-freqs-unigram_cvfold={cv_index}.png"))

        for i in test_indices:
            seq_id = seq_ids[i]
            logger.info(f"  Processing sequence {seq_id}...")

            trial_prefix = f"trial={seq_id}"
            # I include the '.' to differentiate between 'rgb-frame-seq' and
            # 'rgb-frame-seq-before-first-touch'
            # rgb_seq = utils.loadVariable(f"{trial_prefix}_rgb-frame-seq.", data_dir)
            # seg_seq = utils.loadVariable(f"{trial_prefix}_seg-labels-seq", segs_dir)
            score_seq = utils.loadVariable(f"{trial_prefix}_score-seq",
                                           scores_dir)
            # if score_seq.shape[0] != rgb_seq.shape[0]:
            #     err_str = f"scores shape {score_seq.shape} != data shape {rgb_seq.shape}"
            #     raise AssertionError(err_str)

            edge_pred_seq = edge_pred_seqs[i]
            edge_true_seq = edge_true_seqs[i]
            state_pred_seq = state_pred_seqs[i]
            state_true_seq = state_true_seqs[i]

            num_types = np.unique(state_pred_seq).shape[0]
            num_samples = state_pred_seq.shape[0]
            num_total = len(pred_vocab)
            logger.info(
                f"    {num_types} assemblies predicted ({num_total} total); "
                f"{num_samples} samples")

            # edge_freq_bigram, edge_freq_unigram = edge_joint_freqs(edge_true_seq)
            # dist_shift = np.linalg.norm(train_freq_unigram - edge_freq_unigram)
            metric_dict = {
                # 'State OOV rate': oov_rate_state(state_true_seq, state_train_vocab),
                # 'Edge OOV rate': oov_rate_edges(edge_true_seq, edge_train_vocab),
                # 'State avg prob, true': state_probs[state_true_seq].mean(),
                # 'State avg prob, pred': state_probs[state_pred_seq].mean(),
                # 'Edge distribution shift': dist_shift
            }
            metric_dict = eval_edge_metrics(edge_pred_seq,
                                            edge_true_seq,
                                            append_to=metric_dict)
            metric_dict = eval_state_metrics(state_pred_seq,
                                             state_true_seq,
                                             append_to=metric_dict)
            for name, value in metric_dict.items():
                logger.info(f"    {name}: {value * 100:.2f}%")
                all_metrics[name].append(value)

            utils.writeResults(results_file, metric_dict, sweep_param_name,
                               model_params)

            if num_disp_imgs is not None:
                pred_images = tuple(
                    render(dataset, vocab[seg_label])
                    for seg_label in utils.computeSegments(state_pred_seq)[0])
                imageprocessing.displayImages(
                    *pred_images,
                    file_path=os.path.join(
                        io_dir_images,
                        f"seq={seq_id:03d}_pred-assemblies.png"),
                    num_rows=None,
                    num_cols=5)
                true_images = tuple(
                    render(dataset, vocab[seg_label])
                    for seg_label in utils.computeSegments(state_true_seq)[0])
                imageprocessing.displayImages(
                    *true_images,
                    file_path=os.path.join(
                        io_dir_images,
                        f"seq={seq_id:03d}_true-assemblies.png"),
                    num_rows=None,
                    num_cols=5)

                utils.plot_array(score_seq.T,
                                 (edge_true_seq.T, edge_pred_seq.T),
                                 ('true', 'pred'),
                                 fn=os.path.join(io_dir_plots,
                                                 f"seq={seq_id:03d}.png"))
Example #9
0
def main(
        out_dir=None, data_dir=None, part_symmetries=None,
        plot_output=None, results_file=None, sweep_param_name=None, start_from=None):

    if part_symmetries is None:
        part_symmetries = [
            ['frontbeam_hole_1', 'frontbeam_hole_2', 'backbeam_hole_1', 'backbeam_hole_2'],
            ['cushion_hole_1', 'cushion_hole_2']
        ]
        part_symmetries = {
            symms[i]: symms
            for symms in part_symmetries
            for i in range(len(symms))
        }

    data_dir = os.path.expanduser(data_dir)

    out_dir = os.path.expanduser(out_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    debug_dir = os.path.join(out_dir, 'debug')
    if not os.path.exists(debug_dir):
        os.makedirs(debug_dir)

    def saveVariable(var, var_name):
        joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))

    logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))

    logger.info(f"Reading from: {data_dir}")
    logger.info(f"Writing to: {out_dir}")

    if results_file is None:
        results_file = os.path.join(out_dir, 'results.csv')
        # write_mode = 'w'
    else:
        results_file = os.path.expanduser(results_file)
        # write_mode = 'a'

    fig_dir = os.path.join(out_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    out_data_dir = os.path.join(out_dir, 'data')
    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    labels_dir = os.path.join(data_dir, 'labels')

    with open(os.path.join(labels_dir, 'action_and_part_names.yaml'), 'rt') as f:
        names = yaml.safe_load(f)
    action_names = names['action_names']
    action_name_to_index = {name: i for i, name in enumerate(action_names)}
    part_names = names['part_names']
    part_name_to_index = {name: i for i, name in enumerate(part_names)}

    video_ids = []
    all_label_arrs = []
    for label_fn in glob.glob(os.path.join(labels_dir, "*.csv")):
        video_id = utils.stripExtension(label_fn)
        labels_arr = pd.read_csv(label_fn)
        all_label_arrs.append(labels_arr)
        video_ids.append(video_id)

    pose_dir = os.path.join(data_dir, 'poses')
    pose_ids = tuple(
        video_id
        for video_id in video_ids
        if os.path.exists(os.path.join(pose_dir, video_id))
    )
    keep_ids = tuple(v_id in pose_ids for v_id in video_ids)
    logger.info(
        f"Ignoring {len(keep_ids) - sum(keep_ids)} video(s) with missing data: "
        f"{', '.join([v_id for v_id, keep in zip(video_ids, keep_ids) if not keep])}"
    )

    def filterSeq(seq):
        return tuple(x for x, keep_id in zip(seq, keep_ids) if keep_id)
    all_label_arrs = filterSeq(all_label_arrs)
    video_ids = filterSeq(video_ids)

    assembly_vocab = []
    label_seqs = []
    for i, video_id in enumerate(video_ids):
        if start_from is not None and i < start_from:
            continue

        logger.info("PROCESSING VIDEO {0}: {1}".format(i, video_id))

        labels_arr = all_label_arrs[i]

        video_dir = os.path.join(pose_dir, video_id)

        def loadFile(part_name):
            path = os.path.join(video_dir, f'{part_name}.csv')
            arr = pd.read_csv(path)
            return arr

        part_data = tuple(loadFile(part_name) for part_name in part_names)

        poses_seq = np.stack(tuple(arr.values for arr in part_data), axis=-1)

        feature_seq = relativePose(poses_seq, lower_tri_only=True, magnitude_only=True)
        label_seq = actionLabels(
            labels_arr, feature_seq.shape[0],
            action_name_to_index, part_name_to_index
        )

        part_pair_names = makePairs(part_names, lower_tri_only=True)
        is_possible = possibleConnections(part_pair_names)
        feature_seq = feature_seq[:, is_possible, :]
        label_seq = label_seq[:, is_possible]
        part_pair_names = tuple(n for (b, n) in zip(is_possible, part_pair_names) if b)

        utils.plot_multi(
            np.moveaxis(feature_seq, (0, 1, 2), (-1, 0, 1)), label_seq.T,
            axis_names=part_pair_names, label_name='action',
            feature_names=('translation_dist', 'rotation_dist'),
            tick_names=[''] + action_names,
            fn=os.path.join(fig_dir, f"{video_id}_actions.png")
        )

        label_seq = assemblyLabels(
            labels_arr, feature_seq.shape[0],
            assembly_vocab=assembly_vocab, symmetries=part_symmetries
        )
        # expanded_label_seq = _assemblyLabels(
        #     labels_arr, feature_seq.shape[0],
        #     assembly_vocab=assembly_vocab, symmetries=part_symmetries
        # )
        utils.plot_array(
            feature_seq.sum(axis=-1).T, (label_seq,), ('assembly',),
            fn=os.path.join(fig_dir, f"{video_id}_assemblies.png")
        )
        label_seqs.append(label_seq)

        label_segments, __ = utils.computeSegments(label_seq)
        assembly_segments = [assembly_vocab[i] for i in label_segments]
        lib_assembly.writeAssemblies(
            os.path.join(debug_dir, f'trial={video_id}_assembly-seq.txt'),
            assembly_segments
        )

        video_id = video_id.replace('_', '-')
        saveVariable(feature_seq, f'trial={video_id}_feature-seq')
        saveVariable(label_seq, f'trial={video_id}_label-seq')
        # saveVariable(expanded_label_seq, f'trial={video_id}_expanded-label-seq')

    if False:
        from seqtools import utils as su
        transition_probs, start_probs, end_probs = su.smoothCounts(
            *su.countSeqs(label_seqs)
        )
        # import pdb; pdb.set_trace()

    lib_assembly.writeAssemblies(
        os.path.join(debug_dir, 'assembly-vocab.txt'),
        assembly_vocab
    )

    saveVariable(assembly_vocab, 'assembly-vocab')
    with open(os.path.join(out_data_dir, 'action-vocab.yaml'), 'wt') as f:
        yaml.dump(action_names, f)
    with open(os.path.join(out_data_dir, 'part-vocab.yaml'), 'wt') as f:
        yaml.dump(part_names, f)