def test_starmap_async(self): pool = Pool(processes=2) x = [(t.ones([10]) * i, t.ones([10]) * i) for i in range(5)] assert all(out == expect_out for out, expect_out in zip( pool.starmap_async(func2, x).get(), [0, 20, 40, 60, 80])) pool.close() pool.join()
def test_imap_unordered(self): pool = Pool(processes=2) x = [t.ones([10]) * i for i in range(5)] assert all(out == expect_out for out, expect_out in zip( sorted(pool.imap_unordered(func, x)), [0, 20, 40, 60, 80])) pool.close() pool.join()
def test_lambda_and_local(self): x = [t.ones([10]) * i for i in range(5)] y = t.ones([10]) x2 = [(t.ones([10]) * i, t.ones([10]) * i) for i in range(5)] def local_func(xx): nonlocal y return t.sum(xx + y) pool = Pool(processes=2, is_global=True) assert all(out == expect_out for out, expect_out in zip( pool.map(local_func, x), [10, 20, 30, 40, 50])) assert all(out == expect_out for out, expect_out in zip( pool.map(lambda xx: t.sum(xx[0] + xx[1]), x2), [0, 20, 40, 60, 80])) pool.close() pool.join() pool = Pool(processes=2, is_copy_tensors=False) assert all( out == expect_out for out, expect_out in zip(pool.map(func, x), [0, 20, 40, 60, 80])) pool.close() pool.join()
def test_apply(self): pool = Pool(processes=2) x = t.ones([10]) assert pool.apply(func, (x, )) == 20 # for pytest-cov to run on sub processes pool.close() pool.join()
def test_gpu_tensor(self, pytestconfig): x = [ t.ones([10], device=pytestconfig.getoption("gpu_device")) * i for i in range(5) ] pool = Pool(processes=2, is_copy_tensors=True) assert all( out == expect_out for out, expect_out in zip(pool.map(func, x), [0, 20, 40, 60, 80])) pool.close() pool.join() pool = Pool(processes=2, is_copy_tensors=False) assert all( out == expect_out for out, expect_out in zip(pool.map(func, x), [0, 20, 40, 60, 80])) pool.close() pool.join()
def test_apply_async(self): pool = Pool(processes=2) x = t.ones([10]) assert pool.apply_async(func, (x, )).get() == 20 pool.close() pool.join()
def test_size(self): pool = Pool(processes=2) assert pool.size() == 2 pool.close() pool.join()