コード例 #1
0
def test_split():
    arr = list(range(125))

    y = split(arr, 4)
    assert y[-1] == list(range(100, 125))
    assert list(y) == [list(range(i, i + 25)) for i in range(0, 125, 25)]

    y = split(arr, list(range(25, 125, 25)))
    assert y[-1] == list(range(100, 125))
    assert list(y) == [list(range(i, i + 25)) for i in range(0, 125, 25)]

    y = split(arr, [(i, i + 25) for i in range(0, 125, 25)])
    assert y[-1] == list(range(100, 125))
    assert list(y) == [list(range(i, i + 25)) for i in range(0, 125, 25)]

    y[-1] = [0] * 25
    assert arr[100:] == [0] * 25

    y[-1:] = [list(range(0, -25, -1))]
    assert arr[:100] == list(range(100))
    assert arr[100:] == list(range(0, -25, -1))
コード例 #2
0
ファイル: a_data.py プロジェクト: nlgranger/code_these
def reload():
    global train_subset, val_subset, test_subset, \
        durations, gloss_seqs, pose2d_seqs, pose3d_seqs, frame_seqs

    with open(os.path.join(cachedir, "data.pkl"), 'rb') as f:
        durations, gloss_seqs, rec_mapping, transformations, \
            train_subset, val_subset, test_subset = pkl.load(f)

    segments = np.stack(
        [np.cumsum(durations) - durations,
         np.cumsum(durations)], axis=1)

    pose2d_seqs = seqtools.split(
        np.load(os.path.join(cachedir, "pose2d_seqs.npy"), mmap_mode='r'),
        segments)

    pose3d_seqs = seqtools.split(
        np.load(os.path.join(cachedir, "pose3d_seqs.npy"), mmap_mode='r'),
        segments)

    frame_seqs = seqtools.smap(lambda r: dataset.bgr_frames(r), rec_mapping)
    frame_seqs = seqtools.smap(transform_frames, frame_seqs, transformations)
コード例 #3
0
ファイル: dataset.py プロジェクト: nlgranger/code_these
 def poses_3d(self) -> np.ndarray:
     return seqtools.split(self._poses_3d,
                           self.rec_info['skel_data_off'][1:])
コード例 #4
0
def transfer_feat_seqs(transfer_from, freeze_at):
    import theano
    import theano.tensor as T
    import lasagne
    from sltools.nn_utils import adjust_length
    from experiments.utils import reload_best_hmm, reload_best_rnn

    report = shelve.open(os.path.join(cachedir, transfer_from))

    if report['meta']['modality'] == "skel":
        source_feat_seqs = [skel_feat_seqs]
    elif report['meta']['modality'] == "bgr":
        source_feat_seqs = [bgr_feat_seqs]
    elif report['meta']['modality'] == "fusion":
        source_feat_seqs = [skel_feat_seqs, bgr_feat_seqs]
    else:
        raise ValueError()

    # no computation required
    if freeze_at == "inputs":
        return source_feat_seqs

    # reuse cached features
    dump_file = os.path.join(
        cachedir,
        report['meta']['experiment_name'] + "_" + freeze_at + "feats.npy")
    if os.path.exists(dump_file):
        boundaries = np.stack(
            (np.cumsum(durations) - durations, np.cumsum(durations)), axis=1)
        return [split_seq(np.load(dump_file, mmap_mode='r'), boundaries)]

    # reload model
    if report['meta']['model'] == "hmm":
        _, recognizer, _ = reload_best_hmm(report)
        l_in = recognizer.posterior.l_in
        if freeze_at == "embedding":
            l_feats = recognizer.posterior.l_feats
        elif freeze_at == "logits":
            l_feats = recognizer.posterior.l_raw
        elif freeze_at == "posteriors":
            l_feats = lasagne.layers.NonlinearityLayer(
                recognizer.posterior.l_out, T.exp)
        else:
            raise ValueError()
        batch_size, max_time, *_ = l_in[0].output_shape  # TODO: fragile
        warmup = recognizer.posterior.warmup

    else:
        _, model_dict, _ = reload_best_rnn(report)
        l_in = model_dict['l_in']
        l_feats = model_dict['l_feats']
        batch_size, max_time, *_ = l_in[0].output_shape  # TODO: fragile
        warmup = model_dict['warmup']

    feats_var = lasagne.layers.get_output(l_feats, deterministic=True)
    predict_batch_fn = theano.function([l.input_var for l in l_in], feats_var)

    step = max_time - 2 * warmup

    # turn sequences into chunks
    chunks = [(i, k, min(d, k + max_time)) for i, d in enumerate(durations)
              for k in range(0, d - warmup, step)]
    chunked_sequences = []
    for feat in source_feat_seqs:

        def get_chunk(i, t1, t2, feat_=feat):
            return adjust_length(feat_[i][t1:t2], size=max_time, pad=0)

        chunked_sequences.append(seqtools.starmap(get_chunk, chunks))
    chunked_sequences = seqtools.collate(chunked_sequences)

    # turn into minibatches
    null_sample = chunked_sequences[0]
    n_features = len(null_sample)

    def collate(b):
        return [
            np.array([b[i][c] for i in range(batch_size)])
            for c in range(n_features)
        ]

    minibatches = seqtools.batch(chunked_sequences,
                                 batch_size,
                                 pad=null_sample,
                                 collate_fn=collate)
    # minibatches = seqtools.prefetch(minibatches, nworkers=2, max_buffered=10)

    # process
    batched_predictions = seqtools.starmap(predict_batch_fn, minibatches)
    batched_predictions = seqtools.add_cache(batched_predictions)
    chunked_predictions = seqtools.unbatch(batched_predictions, batch_size)

    # recompose
    feat_size = l_feats.output_shape[2:]
    storage = open_memmap(dump_file,
                          'w+',
                          dtype=np.float32,
                          shape=(sum(durations), ) + feat_size)
    subsequences = np.stack(
        [np.cumsum(durations) - durations,
         np.cumsum(durations)], axis=1)
    out_view = seqtools.split(storage, subsequences)

    for v, (s, start, stop) in zip(chunked_predictions, chunks):
        skip = warmup if start > 0 else 0
        out_view[s][start + skip:stop] = v[skip:stop - start]

    return [out_view]