Beispiel #1
0
def test_batching():
    arr = list(range(137))

    chunked = list(batch(arr, 5, True))
    expected = [[i + k for k in range(5)] for i in range(0, 135, 5)]
    assert chunked == expected

    unchunked = list(unbatch(chunked, 5, 0))
    assert unchunked == [x for b in expected for x in b]

    chunked = list(batch(arr, 5, False))
    expected = [[i + k for k in range(5)] for i in range(0, 135, 5)] \
        + [[135, 136]]
    assert chunked == expected

    unchunked = [i for i in unbatch(chunked, 5, len(arr) % 5)]
    assert unchunked == [x for b in expected for x in b]

    chunked = list(batch(arr, 5, pad=0, collate_fn=list))
    expected = [[i + k for k in range(5)] for i in range(0, 135, 5)] \
        + [[135, 136, 0, 0, 0]]
    assert chunked == expected

    chunked = batch(arr, 5, pad=0, collate_fn=list)
    chunked[:1] = [[-1, -2, -3, -4, -5]]
    chunked[-1] = [-135, -136]
    assert arr == [-1, -2, -3, -4, -5] + list(range(5, 135)) + [-135, -136]
Beispiel #2
0
    def predict_fn(feature_sequences):
        durations = np.array([len(s) for s in feature_sequences[0]])
        step = max_time - 2 * warmup

        # turn sequences
        chunks = [(i, k, min(d, k + max_time)) for i, d in enumerate(durations)
                  for k in range(0, d - warmup, step)]
        chunked_sequences = []
        for feat in feature_sequences:

            def get_chunk(i, t1, t2, feat_=feat):
                return adjust_length(feat_[i][t1:t2], size=max_time, pad=0)

            chunked_sequences.append(seqtools.starmap(get_chunk, chunks))
        chunked_sequences.append([np.int32(t2 - t1) for _, t1, t2 in chunks])
        chunked_sequences = seqtools.collate(chunked_sequences)

        # turn into minibatches
        null_sample = chunked_sequences[0]
        n_features = len(null_sample)

        def collate(b):
            return [
                np.array([b[i][c] for i in range(batch_size)])
                for c in range(n_features)
            ]

        minibatches = seqtools.batch(chunked_sequences,
                                     batch_size,
                                     pad=null_sample,
                                     collate_fn=collate)
        # minibatches = seqtools.prefetch(
        #     minibatches, max_cached=nworkers * 5, nworkers=nworkers)

        # process
        batched_predictions = seqtools.starmap(predict_batch_fn, minibatches)
        batched_predictions = seqtools.add_cache(batched_predictions)
        chunked_predictions = seqtools.unbatch(batched_predictions, batch_size)

        # recompose
        out = [
            np.empty((d, ) + l_out.output_shape[2:], dtype=np.float32)
            for d in durations
        ]

        for v, (s, start, stop) in zip(chunked_predictions, chunks):
            skip = warmup if start > 0 else 0
            out[s][start + skip:stop] = v[skip:stop - start]

        return out
Beispiel #3
0
    def make_sequence(self):
        """Build a sequence that looks like a dataloader when iterated over."""
        # shuffling
        if self.batch_sampler:
            batch_indices = list(self.batch_sampler)
            out = seqtools.smap(lambda bi: [self.dataset[i] for i in bi],
                                batch_indices)
        elif self.sampler:
            shuffle_indices = list(self.sampler)
            out = seqtools.gather(self.dataset, shuffle_indices)
        elif self.shuffle:
            shuffle_indices = np.random.permutation(len(self.dataset))
            out = seqtools.gather(self.dataset, shuffle_indices)
        else:
            out = self.dataset

        # batch
        if not self.batch_sampler and self.batch_size is not None:
            out = seqtools.batch(out,
                                 k=self.batch_size,
                                 drop_last=self.drop_last,
                                 collate_fn=self.collate_fn)
        elif self.batch_sampler:
            out = seqtools.smap(self.collate_fn, out)

        # prefetch
        if self.num_workers > 0:
            out = seqtools.prefetch(out,
                                    max_buffered=self.num_workers *
                                    self.prefetch_factor,
                                    nworkers=self.num_workers,
                                    method='process',
                                    start_hook=self.worker_init_fn,
                                    shm_size=self.shm_size)

        # convert into tensors
        out = seqtools.smap(into_tensors, out)

        # pin memory
        if self.pin_memory:
            out = seqtools.smap(pin_tensors_memory, out)
            out = seqtools.prefetch(out,
                                    nworkers=1,
                                    method='thread',
                                    max_buffered=1)

        return out
Beispiel #4
0

loaded_files = seqtools.smap(load, files)
loaded_files = seqtools.add_cache(loaded_files, 2)
all_samples = seqtools.unbatch(loaded_files, 200, 10)


def preprocess(x):
    t = time.clock()
    while time.clock() - t < 0.005:
        pass  # busy waiting
    return x


preprocessed_samples = seqtools.smap(preprocess, all_samples)
minibatches = seqtools.batch(preprocessed_samples, 64, collate_fn=list)

t1 = time.time()
for batch in minibatches:
    pass
t2 = time.time()
print("sequential read took {:.1f}\"".format(t2 - t1))

t1 = time.time()
for batch in seqtools.prefetch(minibatches,
                               max_cached=100,
                               method="thread",
                               nworkers=2):
    pass
t2 = time.time()
print("threaded read took {:.1f}\"".format(t2 - t1))
Beispiel #5
0
def transfer_feat_seqs(transfer_from, freeze_at):
    import theano
    import theano.tensor as T
    import lasagne
    from sltools.nn_utils import adjust_length
    from experiments.utils import reload_best_hmm, reload_best_rnn

    report = shelve.open(os.path.join(cachedir, transfer_from))

    if report['meta']['modality'] == "skel":
        source_feat_seqs = [skel_feat_seqs]
    elif report['meta']['modality'] == "bgr":
        source_feat_seqs = [bgr_feat_seqs]
    elif report['meta']['modality'] == "fusion":
        source_feat_seqs = [skel_feat_seqs, bgr_feat_seqs]
    else:
        raise ValueError()

    # no computation required
    if freeze_at == "inputs":
        return source_feat_seqs

    # reuse cached features
    dump_file = os.path.join(
        cachedir,
        report['meta']['experiment_name'] + "_" + freeze_at + "feats.npy")
    if os.path.exists(dump_file):
        boundaries = np.stack(
            (np.cumsum(durations) - durations, np.cumsum(durations)), axis=1)
        return [split_seq(np.load(dump_file, mmap_mode='r'), boundaries)]

    # reload model
    if report['meta']['model'] == "hmm":
        _, recognizer, _ = reload_best_hmm(report)
        l_in = recognizer.posterior.l_in
        if freeze_at == "embedding":
            l_feats = recognizer.posterior.l_feats
        elif freeze_at == "logits":
            l_feats = recognizer.posterior.l_raw
        elif freeze_at == "posteriors":
            l_feats = lasagne.layers.NonlinearityLayer(
                recognizer.posterior.l_out, T.exp)
        else:
            raise ValueError()
        batch_size, max_time, *_ = l_in[0].output_shape  # TODO: fragile
        warmup = recognizer.posterior.warmup

    else:
        _, model_dict, _ = reload_best_rnn(report)
        l_in = model_dict['l_in']
        l_feats = model_dict['l_feats']
        batch_size, max_time, *_ = l_in[0].output_shape  # TODO: fragile
        warmup = model_dict['warmup']

    feats_var = lasagne.layers.get_output(l_feats, deterministic=True)
    predict_batch_fn = theano.function([l.input_var for l in l_in], feats_var)

    step = max_time - 2 * warmup

    # turn sequences into chunks
    chunks = [(i, k, min(d, k + max_time)) for i, d in enumerate(durations)
              for k in range(0, d - warmup, step)]
    chunked_sequences = []
    for feat in source_feat_seqs:

        def get_chunk(i, t1, t2, feat_=feat):
            return adjust_length(feat_[i][t1:t2], size=max_time, pad=0)

        chunked_sequences.append(seqtools.starmap(get_chunk, chunks))
    chunked_sequences = seqtools.collate(chunked_sequences)

    # turn into minibatches
    null_sample = chunked_sequences[0]
    n_features = len(null_sample)

    def collate(b):
        return [
            np.array([b[i][c] for i in range(batch_size)])
            for c in range(n_features)
        ]

    minibatches = seqtools.batch(chunked_sequences,
                                 batch_size,
                                 pad=null_sample,
                                 collate_fn=collate)
    # minibatches = seqtools.prefetch(minibatches, nworkers=2, max_buffered=10)

    # process
    batched_predictions = seqtools.starmap(predict_batch_fn, minibatches)
    batched_predictions = seqtools.add_cache(batched_predictions)
    chunked_predictions = seqtools.unbatch(batched_predictions, batch_size)

    # recompose
    feat_size = l_feats.output_shape[2:]
    storage = open_memmap(dump_file,
                          'w+',
                          dtype=np.float32,
                          shape=(sum(durations), ) + feat_size)
    subsequences = np.stack(
        [np.cumsum(durations) - durations,
         np.cumsum(durations)], axis=1)
    out_view = seqtools.split(storage, subsequences)

    for v, (s, start, stop) in zip(chunked_predictions, chunks):
        skip = warmup if start > 0 else 0
        out_view[s][start + skip:stop] = v[skip:stop - start]

    return [out_view]