コード例 #1
0
    def call_to_device():
        logger = logging.getLogger()
        # Capture warning logs as elements in a queue.
        logger.addHandler(handlers.QueueHandler(queue))

        warned_types = set() if dedup_between_calls else None
        to_device(["string_data", "string_data"], "cpu", warned_types)
        to_device(["string_data", "string_data"], "cpu", warned_types)
コード例 #2
0
def test_to_device() -> None:
    """
    There doesn't seem to be an easy way to mock out PyTorch devices, so ignore
    testing that the data makes it *on* to the device.
    """
    data_structure = {
        "input_1": torch.Tensor(1),
        "input_3": "str",
        "input_4": 1,
    }

    assert to_device(data_structure, "cpu") == data_structure
    assert np.array_equal(to_device(np.array([0, 1, 2]), "cpu"), np.array([0, 1, 2]))
コード例 #3
0
def test_to_device_warnings(dedup_between_calls) -> None:
    queue = multiprocessing.Queue()

    logger = logging.getLogger()
    # Capture warning logs as elements in a queue.
    logger.addHandler(handlers.QueueHandler(queue))

    warned_types = set() if dedup_between_calls else None
    to_device(["string_data", "string_data"], "cpu", warned_types)
    to_device(["string_data", "string_data"], "cpu", warned_types)

    assert queue.qsize() == 1 if dedup_between_calls else 2
    while queue.qsize():
        msg = queue.get().message
        assert "not able to move data" in msg
コード例 #4
0
    def to_device(self, data: pytorch._Data) -> pytorch.TorchData:
        """Map generated data to the device allocated by the Determined cluster.

        All the data in the data loader and the models are automatically moved to the
        allocated device. This method aims at providing a function for the data generated
        on the fly.
        """
        return pytorch.to_device(data, self.device, self._to_device_warned_types)
コード例 #5
0
def test_to_device_warnings(dedup_between_calls) -> None:
    # Capture warning logs as elements in a queue.
    logger = logging.getLogger()
    q = queue.Queue()
    handler = handlers.QueueHandler(q)
    logger.addHandler(handler)
    try:
        warned_types = set() if dedup_between_calls else None
        to_device(["string_data", "string_data"], "cpu", warned_types)
        to_device(["string_data", "string_data"], "cpu", warned_types)

        assert q.qsize() == 1 if dedup_between_calls else 2
        while q.qsize():
            msg = q.get().message
            assert "not able to move data" in msg
    finally:
        # Restore logging as it was before.
        logger.removeHandler(handler)
コード例 #6
0
    def to_device(self, data: pytorch._Data) -> pytorch.TorchData:
        """Map data to the device allocated by the Determined cluster.

        Since we pass an iterable over the data loader to ``train_batch`` and ``evaluate_batch``
        for DeepSpeedTrial, the user is responsible for moving data to GPU if needed.  This is
        basically a helper function to make that easier.
        """
        with self._record_timing("to_device", accumulate=True):
            return pytorch.to_device(data, self.device,
                                     self._to_device_warned_types)
コード例 #7
0
    def to_device(self, data: pytorch._Data) -> pytorch.TorchData:
        """Map generated data to the device allocated by the Determined cluster.

        All the data in the data loader and the models are automatically moved to the
        allocated device. This method aims at providing a function for the data generated
        on the fly.
        """
        return pytorch.to_device(
            data, self.device,
            self.warning_logged[_WarningLogs.FAILED_MOVING_TO_DEVICE])
コード例 #8
0
ファイル: _pytorch_trial.py プロジェクト: hugokce/determined
 def _to_device(self, data: _Data) -> TorchData:
     return to_device(
         data, self.device,
         self.warning_logged[_WarningLogs.FAILED_MOVING_TO_DEVICE])