Пример #1
0
    def __call__(self, batch, device=None, padding=None):
        """Concatenate data and transfer them to GPU asynchronously.

        See also :func:`chainer.dataset.concat_examples`.

        Args:
            batch (list): A list of examples.
            device (int): Device ID to which each array is sent.
            padding: Scalar value for extra elements.

        Returns:
            Array, a tuple of arrays, or a dictionary of arrays.
            The type depends on the type of each example in the batch.
        """
        if len(batch) == 0:
            raise ValueError('batch is empty')
        first_elem = batch[0]

        if len(self._conveyor) == 0:
            self._device = device  # device is set at first call
            if device is not None and device >= 0 and self._stream is None:
                with cuda.get_device_from_id(device):
                    self._stream = cuda.Stream(non_blocking=True)
        if device is not self._device:
            raise ValueError('device is different')

        with cuda.get_device_from_id(device):
            if isinstance(first_elem, tuple):
                result = []
                if not isinstance(padding, tuple):
                    padding = [padding] * len(first_elem)

                for i in six.moves.range(len(first_elem)):
                    self._conveyor[i].put(
                        _concat_arrays([example[i] for example in batch],
                                       padding[i]))

                for i in six.moves.range(len(first_elem)):
                    result.append(self._conveyor[i].get())

                return tuple(result)

            elif isinstance(first_elem, dict):
                result = {}
                if not isinstance(padding, dict):
                    padding = {key: padding for key in first_elem}

                for key in first_elem:
                    self._conveyor[key].put(
                        _concat_arrays([example[key] for example in batch],
                                       padding[key]))

                for key in first_elem:
                    result[key] = self._conveyor[key].get()

                return result

            else:
                return to_device(device, _concat_arrays(batch, padding))
Пример #2
0
 def test_cupy_array_async1(self):
     x = cuda.to_gpu(self.x)
     if not self.c_contiguous:
         x = cuda.cupy.asfortranarray(x)
     with testing.assert_warns(DeprecationWarning):
         y = cuda.to_gpu(x, stream=cuda.Stream())
     self.assertIsInstance(y, cuda.ndarray)
     self.assertIs(x, y)  # Do not copy
     cuda.cupy.testing.assert_array_equal(x, y)
Пример #3
0
 def test_numpy_array_async(self):
     y = cuda.to_cpu(self.x, stream=cuda.Stream())
     self.assertIsInstance(y, numpy.ndarray)
     self.assertIs(self.x, y)  # Do not copy
Пример #4
0
 def test_numpy_array_async(self):
     y = cuda.to_cpu(self.x, stream=cuda.Stream())
     assert isinstance(y, numpy.ndarray)
     assert self.x is y  # Do not copy