Пример #1
0
    def test_augmentations_with_seed_match_for_images_and_keypoints(self):
        augseq = iaa.AddElementwise((0, 255))
        image = np.zeros((10, 10, 1), dtype=np.uint8)
        # keypoints here will not be changed by augseq, but they will induce
        # deterministic mode to start in augment_batches() as each batch
        # contains images AND keypoints
        kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
        batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
        batches = [batch.deepcopy() for _ in sm.xrange(60)]

        # seed=1
        with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
                            seed=1) as pool:
            batches_aug1 = pool.map_batches(batches, chunksize=2)
        # seed=1
        with multicore.Pool(augseq, processes=2, seed=1) as pool:
            batches_aug2 = pool.map_batches(batches, chunksize=1)
        # seed=2
        with multicore.Pool(augseq, processes=2, seed=2) as pool:
            batches_aug3 = pool.map_batches(batches, chunksize=1)

        assert len(batches_aug1) == 60
        assert len(batches_aug2) == 60
        assert len(batches_aug3) == 60

        for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
            for batch in batches_aug:
                for keypoints_aug in batch.keypoints_aug:
                    assert keypoints_aug.keypoints[0].x == 2
                    assert keypoints_aug.keypoints[0].y == 0

        for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
            # images were augmented
            assert not np.array_equal(b1.images_unaug, b1.images_aug)
            assert not np.array_equal(b2.images_unaug, b2.images_aug)
            assert not np.array_equal(b3.images_unaug, b3.images_aug)

            # original images still the same
            assert np.array_equal(b1.images_unaug, batch.images_unaug)
            assert np.array_equal(b2.images_unaug, batch.images_unaug)
            assert np.array_equal(b3.images_unaug, batch.images_unaug)

            # augmentations for same seed are the same
            assert np.array_equal(b1.images_aug, b2.images_aug)

            # augmentations for different seeds are different
            assert not np.array_equal(b1.images_aug, b3.images_aug)

        # make sure that batches for the two pools with same seed did not
        # repeat within results (only between the results of the two pools)
        for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
            self._assert_each_augmentation_not_more_than_once(batches_aug)
Пример #2
0
    def test_augmentations_without_seed_differ(self):
        augseq = iaa.AddElementwise((0, 255))
        image = np.zeros((10, 10, 1), dtype=np.uint8)
        batch = ia.Batch(images=np.uint8([image, image]))
        batches = [batch.deepcopy() for _ in sm.xrange(20)]
        with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
            batches_aug = pool.map_batches(batches, chunksize=2)
        with multicore.Pool(augseq, processes=2) as pool:
            batches_aug.extend(pool.map_batches(batches, chunksize=1))

        assert len(batches_aug) == 2 * 20

        self._assert_each_augmentation_not_more_than_once(batches_aug)
Пример #3
0
    def test_inputs_not_lost(self):
        """Test to make sure that inputs (e.g. images) are never lost."""
        def _assert_contains_all_ids(batches_aug):
            # batch.images_unaug
            ids = set()
            for batch_aug in batches_aug:
                ids.add(int(batch_aug.images_unaug.flat[0]))
                ids.add(int(batch_aug.images_unaug.flat[1]))
            for idx in sm.xrange(2 * 100):
                assert idx in ids
            assert len(ids) == 200

            # batch.images_aug
            ids = set()
            for batch_aug in batches_aug:
                ids.add(int(batch_aug.images_aug.flat[0]))
                ids.add(int(batch_aug.images_aug.flat[1]))
            for idx in sm.xrange(2 * 100):
                assert idx in ids
            assert len(ids) == 200

        augseq = iaa.Identity()
        image = np.zeros((1, 1, 1), dtype=np.uint8)
        # creates batches containing images with ids from 0 to 199 (one pair
        # of consecutive ids per batch)
        batches = [
            ia.Batch(
                images=np.uint8([image + b_idx * 2, image + b_idx * 2 + 1]))
            for b_idx in sm.xrange(100)
        ]

        with multicore.Pool(augseq, processes=2, maxtasksperchild=25) as pool:
            batches_aug = pool.map_batches(batches)
            _assert_contains_all_ids(batches_aug)

        with multicore.Pool(augseq, processes=2, maxtasksperchild=25,
                            seed=1) as pool:
            batches_aug = pool.map_batches(batches)
            _assert_contains_all_ids(batches_aug)

        with multicore.Pool(augseq, processes=3, seed=2) as pool:
            batches_aug = pool.map_batches(batches)
            _assert_contains_all_ids(batches_aug)

        with multicore.Pool(augseq, processes=2, seed=None) as pool:
            batches_aug = pool.map_batches(batches)
            _assert_contains_all_ids(batches_aug)

            batches_aug = pool.map_batches(batches)
            _assert_contains_all_ids(batches_aug)
Пример #4
0
    def test_processes(self):
        augseq = iaa.Identity()
        mock_Pool = mock.MagicMock()
        mock_cpu_count = mock.Mock()

        patch_pool = mock.patch("multiprocessing.Pool", mock_Pool)
        patch_cpu_count = mock.patch("multiprocessing.cpu_count",
                                     mock_cpu_count)
        with patch_pool, patch_cpu_count:
            # (cpu cores available, processes requested, processes started)
            combos = [(1, 1, 1), (2, 1, 1), (3, 1, 1), (1, 2, 2), (3, 2, 2),
                      (1, None, None), (2, None, None), (3, None, None),
                      (1, -1, 1), (2, -1, 1), (3, -1, 2), (4, -2, 2)]

            for cores_available, processes_req, expected in combos:
                with self.subTest(cpu_count_available=cores_available,
                                  processes_requested=processes_req):
                    mock_cpu_count.return_value = cores_available
                    with multicore.Pool(augseq,
                                        processes=processes_req) as _pool:
                        pass

                    if expected is None:
                        assert mock_Pool.call_args[0][0] is None
                    else:
                        assert mock_Pool.call_args[0][0] == expected
Пример #5
0
    def test_cpu_count_does_not_exist(self, mock_pool):
        def _side_effect():
            raise NotImplementedError

        old_method = multicore._get_context().cpu_count
        mock_cpu_count = mock.Mock()
        mock_cpu_count.side_effect = _side_effect
        multicore._get_context().cpu_count = mock_cpu_count

        augseq = iaa.Identity()
        with warnings.catch_warnings(record=True) as caught_warnings:
            warnings.simplefilter("always")
            with multicore.Pool(augseq, processes=-1):
                pass

        assert mock_cpu_count.call_count == 1
        assert mock_pool.call_count == 1
        # 'processes' arg to Pool was expected to be set to None as cpu_count
        # produced an error
        assert mock_pool.call_args_list[0][0][0] is None

        assert len(caught_warnings) == 1
        assert ("Could not find method multiprocessing.cpu_count(). "
                in str(caught_warnings[-1].message))

        multicore._get_context().cpu_count = old_method
Пример #6
0
    def test_property_pool(self):
        mock_Pool = mock.MagicMock()
        mock_Pool.return_value = mock_Pool
        mock_Pool.close.return_value = None
        mock_Pool.join.return_value = None

        # We cannot just mock multiprocessing.Pool here, because of using
        # a custom context. We would have to mock each possible context's
        # Pool() method or overwrite here the Pool() method of the
        # actually used context.
        with mock.patch("multiprocessing.pool.Pool", mock_Pool):
            augseq = iaa.Identity()
            pool_config = multicore.Pool(augseq,
                                         processes=1,
                                         maxtasksperchild=4,
                                         seed=123)
            with pool_config as pool:
                assert pool.processes == 1
            assert pool._pool is None
        assert mock_Pool.call_count == 1
        assert mock_Pool.close.call_count == 1
        assert mock_Pool.join.call_count == 1
        # see
        # https://github.com/
        # python/cpython/blob/master/Lib/multiprocessing/context.py
        # L119 (method Pool()) for an example of how Pool() is called
        # internally.
        assert mock_Pool.call_args[0][0] == 1  # processes
        assert mock_Pool.call_args[0][1] is multicore._Pool_initialize_worker
        assert mock_Pool.call_args[0][2] == (augseq, 123)
        assert mock_Pool.call_args[0][3] == 4
Пример #7
0
    def _test_map_batches_both(self, call_async):
        augseq = iaa.Noop()
        mock_Pool = mock.MagicMock()
        mock_Pool.return_value = mock_Pool
        mock_Pool.map.return_value = "X"
        mock_Pool.map_async.return_value = "X"
        with mock.patch("multiprocessing.Pool", mock_Pool):
            batches = [
                ia.Batch(images=[ia.quokka()]),
                ia.Batch(images=[ia.quokka() + 1])
            ]
            with multicore.Pool(augseq, processes=1) as pool:
                if call_async:
                    _ = pool.map_batches_async(batches)
                else:
                    _ = pool.map_batches(batches)

            if call_async:
                to_check = mock_Pool.map_async
            else:
                to_check = mock_Pool.map

            assert to_check.call_count == 1
            # args, arg 0
            assert to_check.call_args[0][0] == multicore._Pool_starworker
            # args, arg 1 (batches with ids), tuple 0, entry 0 in tuple (=> batch id)
            assert to_check.call_args[0][1][0][0] == 0
            # args, arg 1 (batches with ids), tuple 0, entry 1 in tuple (=> batch)
            assert np.array_equal(to_check.call_args[0][1][0][1].images_unaug,
                                  batches[0].images_unaug)
            # args, arg 1 (batches with ids), tuple 1, entry 0 in tuple (=> batch id)
            assert to_check.call_args[0][1][1][0] == 1
            # args, arg 1 (batches with ids), tuple 1, entry 1 in tuple (=> batch)
            assert np.array_equal(to_check.call_args[0][1][1][1].images_unaug,
                                  batches[1].images_unaug)
Пример #8
0
 def test_fast(processes, chunksize):
     augseq = iaa.Dropout(0.1)
     with multicore.Pool(augseq, processes=processes) as pool:
         batches = list(load_images(n_batches=10000, draw_text=False))
         time_start = time.time()
         batches_aug = pool.map_batches(batches, chunksize=chunksize)
         assert len(batches_aug) == 10000
         print("chunksize=%d, worker=%s, time=%.4fs" %
               (chunksize, processes, time.time() - time_start))
Пример #9
0
 def test_heavy(processes, chunksize):
     augseq_heavy = iaa.PiecewiseAffine(scale=0.2, nb_cols=8, nb_rows=8)
     with multicore.Pool(augseq_heavy, processes=processes) as pool:
         batches = list(load_images(n_batches=500, draw_text=False))
         time_start = time.time()
         batches_aug = pool.map_batches(batches, chunksize=chunksize)
         assert len(batches_aug) == 500
         print("chunksize=%d, worker=%s, time=%.4fs" %
               (chunksize, processes, time.time() - time_start))
Пример #10
0
    def test_augmentations_with_seed_match(self):
        nb_batches = 60
        augseq = iaa.AddElementwise((0, 255))
        image = np.zeros((10, 10, 1), dtype=np.uint8)
        batch = ia.Batch(images=np.uint8([image, image]))
        batches = [batch.deepcopy() for _ in sm.xrange(nb_batches)]

        # seed=1
        with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
                            seed=1) as pool:
            batches_aug1 = pool.map_batches(batches, chunksize=2)

        # seed=1
        with multicore.Pool(augseq, processes=2, seed=1) as pool:
            batches_aug2 = pool.map_batches(batches, chunksize=1)
        # seed=2
        with multicore.Pool(augseq, processes=2, seed=2) as pool:
            batches_aug3 = pool.map_batches(batches, chunksize=1)

        assert len(batches_aug1) == nb_batches
        assert len(batches_aug2) == nb_batches
        assert len(batches_aug3) == nb_batches

        for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
            # images were augmented
            assert not np.array_equal(b1.images_unaug, b1.images_aug)
            assert not np.array_equal(b2.images_unaug, b2.images_aug)
            assert not np.array_equal(b3.images_unaug, b3.images_aug)

            # original images still the same
            assert np.array_equal(b1.images_unaug, batch.images_unaug)
            assert np.array_equal(b2.images_unaug, batch.images_unaug)
            assert np.array_equal(b3.images_unaug, batch.images_unaug)

            # augmentations for same seed are the same
            assert np.array_equal(b1.images_aug, b2.images_aug)

            # augmentations for different seeds are different
            assert not np.array_equal(b1.images_aug, b3.images_aug)

        # make sure that batches for the two pools with same seed did not
        # repeat within results (only between the results of the two pools)
        for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
            self._assert_each_augmentation_not_more_than_once(batches_aug)
Пример #11
0
    def test_augmentations_without_seed_differ_for_images_and_keypoints(self):
        augseq = iaa.AddElementwise((0, 255))
        image = np.zeros((10, 10, 1), dtype=np.uint8)
        # keypoints here will not be changed by augseq, but they will induce deterministic mode to start in
        # augment_batches() as each batch contains images AND keypoints
        kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
        batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
        batches = [batch.deepcopy() for _ in sm.xrange(20)]
        with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
            batches_aug = pool.map_batches(batches, chunksize=2)
        with multicore.Pool(augseq, processes=2) as pool:
            batches_aug.extend(pool.map_batches(batches, chunksize=1))

        assert len(batches_aug) == 2 * 20

        for batch in batches_aug:
            for keypoints_aug in batch.keypoints_aug:
                assert keypoints_aug.keypoints[0].x == 2
                assert keypoints_aug.keypoints[0].y == 0

        self._assert_each_augmentation_not_more_than_once(batches_aug)
Пример #12
0
    def _test_imap_batches_both(cls, call_unordered):
        for clazz in [Batch, UnnormalizedBatch]:
            batches = [
                clazz(images=[ia.quokka()]),
                clazz(images=[ia.quokka() + 1])
            ]

            def _generate_batches():
                for batch in batches:
                    yield batch

            augseq = iaa.Identity()
            mock_Pool = mock.MagicMock()
            mock_Pool.return_value = mock_Pool
            mock_Pool.imap.return_value = batches
            mock_Pool.imap_unordered.return_value = batches
            with mock.patch("multiprocessing.pool.Pool", mock_Pool):
                with multicore.Pool(augseq, processes=1) as pool:
                    gen = _generate_batches()
                    if call_unordered:
                        _ = list(pool.imap_batches_unordered(gen))
                    else:
                        _ = list(pool.imap_batches(gen))

                if call_unordered:
                    to_check = mock_Pool.imap_unordered
                else:
                    to_check = mock_Pool.imap

                assert to_check.call_count == 1

                assert to_check.call_args[0][0] == multicore._Pool_starworker

                # convert generator to list, make it subscriptable
                arg_batches = list(to_check.call_args[0][1])

                # args, arg 1 (batches with ids), tuple 0,
                # entry 0 in tuple (=> batch id)
                assert arg_batches[0][0] == 0

                # tuple 0, entry 1 in tuple (=> batch)
                assert np.array_equal(arg_batches[0][1].images_unaug,
                                      batches[0].images_unaug)

                # tuple 1, entry 0 in tuple (=> batch id)
                assert arg_batches[1][0] == 1

                # tuple 1, entry 1 in tuple (=> batch)
                assert np.array_equal(arg_batches[1][1].images_unaug,
                                      batches[1].images_unaug)
Пример #13
0
    def test_processes(self):
        augseq = iaa.Identity()
        mock_Pool = mock.MagicMock()
        mock_cpu_count = mock.Mock()

        # We cannot just mock multiprocessing.Pool here, because of using
        # a custom context. We would have to mock each possible context's
        # Pool() method or overwrite here the Pool() method of the
        # actually used context.
        patch_pool = mock.patch("multiprocessing.pool.Pool", mock_Pool)

        # Multiprocessing seems to always access os.cpu_count to get the
        # current count of cpu cores.
        # See
        # https://github.com/
        # python/cpython/blob/master/Lib/multiprocessing/context.py
        # L41.
        fname = ("os.cpu_count" if IS_SUPPORTING_CONTEXTS
                 else "multiprocessing.cpu_count")
        patch_cpu_count = mock.patch(fname, mock_cpu_count)

        with patch_pool, patch_cpu_count:
            # (cpu cores available, processes requested, processes started)
            combos = [
                (1, 1, 1),
                (2, 1, 1),
                (3, 1, 1),
                (1, 2, 2),
                (3, 2, 2),
                (1, None, None),
                (2, None, None),
                (3, None, None),
                (1, -1, 1),
                (2, -1, 1),
                (3, -1, 2),
                (4, -2, 2)
            ]

            for cores_available, processes_req, expected in combos:
                with self.subTest(cpu_count_available=cores_available,
                                  processes_requested=processes_req):
                    mock_cpu_count.return_value = cores_available
                    with multicore.Pool(augseq,
                                        processes=processes_req) as _pool:
                        pass

                    if expected is None:
                        assert mock_Pool.call_args[0][0] is None
                    else:
                        assert mock_Pool.call_args[0][0] == expected
Пример #14
0
    def test_join_via_mock(self, mock_pool):
        # According to codecov, the join() does not get beyond its initial
        # if statement in the test_join() test, even though it should be.
        # Might be a simple travis multicore problem?
        # It is tested here again via some mocking.
        mock_pool.return_value = mock_pool
        mock_pool.join.return_value = True
        with multicore.Pool(iaa.Identity(), processes=2) as pool:
            pool.join()

            # Make sure that __exit__ does not call close(), which would then
            # call join() again and we would get a call_count of 2
            pool._pool = None

        assert mock_pool.join.call_count == 1
Пример #15
0
 def test_property_pool(self):
     mock_Pool = mock.MagicMock()
     mock_Pool.return_value = mock_Pool
     mock_Pool.close.return_value = None
     mock_Pool.join.return_value = None
     with mock.patch("multiprocessing.Pool", mock_Pool):
         augseq = iaa.Noop()
         with multicore.Pool(augseq,
                             processes=1,
                             maxtasksperchild=4,
                             seed=123) as pool:
             assert pool.processes == 1
         assert pool._pool is None
     assert mock_Pool.call_count == 1
     assert mock_Pool.close.call_count == 1
     assert mock_Pool.join.call_count == 1
     assert mock_Pool.call_args[0][0] == 1  # processes
     assert mock_Pool.call_args[1]["initargs"] == (augseq, 123)
     assert mock_Pool.call_args[1]["maxtasksperchild"] == 4
Пример #16
0
    def test_processes(self):
        augseq = iaa.Noop()
        mock_Pool = mock.MagicMock()
        mock_cpu_count = mock.Mock()
        with mock.patch("multiprocessing.Pool",
                        mock_Pool), mock.patch("multiprocessing.cpu_count",
                                               mock_cpu_count):
            combos = [(1, 1, 1), (2, 1, 1), (3, 1, 1), (1, 2, 2), (3, 2, 2),
                      (1, None, None), (2, None, None), (3, None, None),
                      (1, -1, 1), (2, -1, 1), (3, -1, 2), (4, -2, 2)]

            for ret_val, inputs, expected in combos:
                mock_cpu_count.return_value = ret_val
                with multicore.Pool(augseq, processes=inputs) as _pool:
                    pass

                if expected is None:
                    assert mock_Pool.call_args[0][0] is None
                else:
                    assert mock_Pool.call_args[0][0] == expected
Пример #17
0
def main():
    augseq = iaa.Sequential(
        [iaa.Fliplr(0.5),
         iaa.CoarseDropout(p=0.1, size_percent=0.1)])

    def func_images(images, random_state, parents, hooks):
        time.sleep(0.2)
        return images

    def func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def func_keypoints(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    augseq_slow = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Lambda(func_images=func_images,
                   func_heatmaps=func_heatmaps,
                   func_keypoints=func_keypoints)
    ])

    print("------------------")
    print(".pool()")
    print("------------------")
    with augseq.pool() as pool:
        time_start = time.time()
        batches = list(load_images())
        batches_aug = pool.map_batches(batches)
        images_aug = []
        keypoints_aug = []
        for batch_aug in batches_aug:
            images_aug.append(batch_aug.images_aug)
            keypoints_aug.append(batch_aug.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.map_batches(batches)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches = list(load_images())
        batches_aug = pool.map_batches(batches)
        images_aug = []
        keypoints_aug = []
        for batch_aug in batches_aug:
            images_aug.append(batch_aug.images_aug)
            keypoints_aug.append(batch_aug.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images())
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=32)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=32)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=2)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=2)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=1)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=1)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.map_batches(batches, chunksize=32)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.map_batches(list(load_images(n_batches=1000)),
                                       chunksize=32)
        assert len(batches_aug) == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.map_batches chunksize with fast aug")
    print("------------------")

    def test_fast(processes, chunksize):
        augseq = iaa.Dropout(0.1)
        with multicore.Pool(augseq, processes=processes) as pool:
            batches = list(load_images(n_batches=10000, draw_text=False))
            time_start = time.time()
            batches_aug = pool.map_batches(batches, chunksize=chunksize)
            assert len(batches_aug) == 10000
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_fast(-4, 1)
    test_fast(1, 1)
    test_fast(None, 1)
    test_fast(1, 4)
    test_fast(None, 4)
    test_fast(1, 32)
    test_fast(None, 32)

    print("------------------")
    print("Pool.imap_batches chunksize with fast aug")
    print("------------------")

    def test_fast_imap(processes, chunksize):
        augseq = iaa.Dropout(0.1)
        with multicore.Pool(augseq, processes=processes) as pool:
            time_start = time.time()
            batches_aug = pool.imap_batches(load_images(n_batches=10000,
                                                        draw_text=False),
                                            chunksize=chunksize)
            batches_aug = list(batches_aug)
            assert len(batches_aug) == 10000
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_fast_imap(-4, 1)
    test_fast_imap(1, 1)
    test_fast_imap(None, 1)
    test_fast_imap(1, 4)
    test_fast_imap(None, 4)
    test_fast_imap(1, 32)
    test_fast_imap(None, 32)

    print("------------------")
    print("Pool.map_batches with computationally expensive aug")
    print("------------------")

    def test_heavy(processes, chunksize):
        augseq_heavy = iaa.PiecewiseAffine(scale=0.2, nb_cols=8, nb_rows=8)
        with multicore.Pool(augseq_heavy, processes=processes) as pool:
            batches = list(load_images(n_batches=500, draw_text=False))
            time_start = time.time()
            batches_aug = pool.map_batches(batches, chunksize=chunksize)
            assert len(batches_aug) == 500
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_heavy(-4, 1)
    test_heavy(1, 1)
    test_heavy(None, 1)
    test_heavy(1, 4)
    test_heavy(None, 4)
    test_heavy(1, 32)
    test_heavy(None, 32)

    print("------------------")
    print("Pool.imap_batches(batches), slow loading")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100, sleep=0.2))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches), maxtasksperchild=4")
    print("------------------")
    with multicore.Pool(augseq, maxtasksperchild=4) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches), seed=1")
    print("------------------")
    # we color here the images of the first worker to see in the grids which images belong to one worker
    with PoolWithMarkedWorker(augseq, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_a = draw_grid(images_aug, keypoints_aug)

    with multicore.Pool(augseq, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_b = draw_grid(images_aug, keypoints_aug)

    grid_b[:, 0:2, 0] = 0
    grid_b[:, 0:2, 1] = 255
    grid_b[:, 0:2, 2] = 0
    ia.imshow(np.hstack([grid_a, grid_b]))

    print("------------------")
    print("Pool.imap_batches(batches), seed=None")
    print("------------------")
    with multicore.Pool(augseq, seed=None) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_a = draw_grid(images_aug, keypoints_aug)

    with multicore.Pool(augseq, seed=None) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_b = draw_grid(images_aug, keypoints_aug)

    ia.imshow(np.hstack([grid_a, grid_b]))

    print("------------------")
    print("Pool.imap_batches(batches), maxtasksperchild=4, seed=1")
    print("------------------")
    with multicore.Pool(augseq, maxtasksperchild=4, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    for augseq_i in [augseq, augseq_slow]:
        print("------------------")
        print("Many very small runs (batches=1)")
        print("------------------")
        with multicore.Pool(augseq_i) as pool:
            time_start = time.time()
            for i in range(100):
                _ = pool.map_batches(list(load_images(n_batches=1)))
            print("Done in %.4fs" % (time.time() - time_start, ))

        print("------------------")
        print("Many very small runs (batches=2)")
        print("------------------")
        with multicore.Pool(augseq_i) as pool:
            time_start = time.time()
            for i in range(100):
                _ = pool.map_batches(list(load_images(n_batches=2)))
            print("Done in %.4fs" % (time.time() - time_start, ))
Пример #18
0
 def test_close(self):
     augseq = iaa.Noop()
     with multicore.Pool(augseq, processes=2) as pool:
         pool.close()
Пример #19
0
 def test_join(self):
     augseq = iaa.Identity()
     with multicore.Pool(augseq, processes=2) as pool:
         pool.close()
         pool.join()
Пример #20
0
 def test_terminate(self):
     augseq = iaa.Identity()
     with multicore.Pool(augseq, processes=2) as pool:
         pool.terminate()
Пример #21
0
 def test___init___seed_out_of_bounds(self):
     augseq = iaa.Identity()
     with self.assertRaises(AssertionError) as context:
         _ = multicore.Pool(augseq, seed=iarandom.SEED_MAX_VALUE + 100)
     assert "Expected `seed` to be" in str(context.exception)
Пример #22
0
    def _test_imap_batches_both_output_buffer_size(cls,
                                                   call_unordered,
                                                   timeout=0.075):
        batches = [
            ia.Batch(images=[np.full((1, 1), i, dtype=np.uint8)])
            for i in range(8)
        ]

        def _generate_batches(times):
            for batch in batches:
                yield batch
                times.append(time.time())

        def callfunc(pool, gen, output_buffer_size):
            func = (pool.imap_batches_unordered
                    if call_unordered else pool.imap_batches)

            for v in func(gen, output_buffer_size=output_buffer_size):
                yield v

        def contains_all_ids(inputs):
            arrs = np.uint8([batch.images_aug for batch in inputs])
            ids_uq = np.unique(arrs)
            return (len(ids_uq) == len(batches) and np.all(0 <= ids_uq)
                    and np.all(ids_uq < len(batches)))

        augseq = iaa.Identity()
        with multicore.Pool(augseq, processes=1) as pool:
            # no output buffer limit, there should be no noteworthy lag
            # for any batch requested from _generate_batches()
            times = []
            gen = callfunc(pool, _generate_batches(times), None)
            result = next(gen)
            time.sleep(timeout)
            result = [result] + list(gen)
            times = np.float64(times)
            times_diffs = times[1:] - times[0:-1]
            assert np.all(times_diffs < timeout * 1.01)
            assert contains_all_ids(result)

            # with output buffer limit, but set to the number of batches,
            # i.e. should again not lead to any lag
            times = []
            gen = callfunc(pool, _generate_batches(times), len(batches))
            result = next(gen)
            time.sleep(timeout)
            result = [result] + list(gen)
            times = np.float64(times)
            times_diffs = times[1:] - times[0:-1]
            assert np.all(times_diffs < timeout * 1.01)
            assert contains_all_ids(result)

            # With output buffer limit of #batches/2 (=4), followed by a
            # timeout after starting the loading process. This should quickly
            # load batches until the buffer is full, then wait until the
            # batches are requested from the buffer (i.e. after the timeout
            # ended) and then proceed to produce batches at the speed at which
            # they are requested. This should lead to a measureable lag between
            # batch 4 and 5 (matching the timeout).
            times = []
            gen = callfunc(pool, _generate_batches(times), 4)
            result = next(gen)
            time.sleep(timeout)
            result = [result] + list(gen)
            times = np.float64(times)
            times_diffs = times[1:] - times[0:-1]
            # use -1 here because we have N-1 times for N batches as
            # diffs denote diffs between Nth and N+1th batch
            assert np.all(times_diffs[0:4 - 1] < timeout * 1.01)
            assert np.all(times_diffs[4 - 1:4 - 1 + 1] >= timeout * 0.99)
            assert np.all(times_diffs[4 - 1 + 1:] < timeout * 1.01)
            assert contains_all_ids(result)