Exemple #1
0
        def test_func(self, *args, **kw):
            use_multi_gpus = config.use_multi_gpus
            _devices = config._devices

            try:
                for gpus in gpu_configs:
                    try:
                        nGPUs = len(gpus)
                        assert nGPUs >= 2, 'Must use at least two gpus'
                        config.use_multi_gpus = True
                        config.set_cufft_gpus(gpus)

                        impl(self, *args, **kw)
                    except Exception:
                        print('GPU config is:', gpus)
                        raise
            finally:
                config.use_multi_gpus = use_multi_gpus
                config._devices = _devices
Exemple #2
0
    def test_LRU_cache7(self):
        # test accessing a multi-GPU plan
        cache0 = self.caches[0]
        cache1 = self.caches[1]

        # ensure a fresh state
        assert cache0.get_curr_size() == 0 <= cache0.get_size()
        assert cache1.get_curr_size() == 0 <= cache1.get_size()

        # do some computation on GPU 0
        with device.Device(0):
            a = testing.shaped_random((10, ), cupy, cupy.float32)
            cupy.fft.fft(a)
        assert cache0.get_curr_size() == 1 <= cache0.get_size()
        assert cache1.get_curr_size() == 0 <= cache1.get_size()

        # do a multi-GPU FFT
        config.use_multi_gpus = True
        config.set_cufft_gpus([0, 1])
        c = testing.shaped_random((128, ), cupy, cupy.complex64)
        cupy.fft.fft(c)
        assert cache0.get_curr_size() == 2 <= cache0.get_size()
        assert cache1.get_curr_size() == 1 <= cache1.get_size()

        # check both devices' caches see the same multi-GPU plan
        plan0 = next(iter(cache0))[1].plan
        plan1 = next(iter(cache1))[1].plan
        assert plan0 is plan1

        # reset
        config.use_multi_gpus = False
        config._device = None

        # do some computation on GPU 1
        with device.Device(1):
            e = testing.shaped_random((20, ), cupy, cupy.complex128)
            cupy.fft.fft(e)
        assert cache0.get_curr_size() == 2 <= cache0.get_size()
        assert cache1.get_curr_size() == 2 <= cache1.get_size()

        # by this time, the multi-GPU plan remains the most recently
        # used one on GPU 0, but not on GPU 1
        assert plan0 is next(iter(cache0))[1].plan
        assert plan1 is not next(iter(cache1))[1].plan

        # now use it again to make it the most recent
        config.use_multi_gpus = True
        config.set_cufft_gpus([0, 1])
        c = testing.shaped_random((128, ), cupy, cupy.complex64)
        cupy.fft.fft(c)
        assert cache0.get_curr_size() == 2 <= cache0.get_size()
        assert cache1.get_curr_size() == 2 <= cache1.get_size()
        assert plan0 is next(iter(cache0))[1].plan
        assert plan1 is next(iter(cache1))[1].plan
        # reset
        config.use_multi_gpus = False
        config._device = None

        # Do 2 more different FFTs on one of the devices, and the
        # multi-GPU plan would be discarded from both caches
        with device.Device(1):
            x = testing.shaped_random((30, ), cupy, cupy.complex128)
            cupy.fft.fft(x)
            y = testing.shaped_random((40, 40), cupy, cupy.complex64)
            cupy.fft.fftn(y)
        for _, node in cache0:
            assert plan0 is not node.plan
        for _, node in cache1:
            assert plan1 is not node.plan
        assert cache0.get_curr_size() == 1 <= cache0.get_size()
        assert cache1.get_curr_size() == 2 <= cache1.get_size()