Exemplo n.º 1
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str,
                                   int]] = None) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device

        Returns:
            Self with the underlying C++ tile moved to CUDA memory.

        Raises:
            CudaError: if the library has not been compiled with CUDA.
        """
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        device = torch_device('cuda', cuda_device(device).idx)

        if self.is_cuda and device != self.device:
            raise CudaError(
                'Cannot switch CUDA devices of existing Cuda tiles')

        if isinstance(self.tile, tiles.AnalogTile):
            with cuda_device(device):
                self.tile = tiles.CudaAnalogTile(self.tile)
                self.is_cuda = True
                self.device = device
                self.analog_ctx.cuda(device)

        return self
Exemplo n.º 2
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> BaseTile:
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device

        Returns:
            A copy of this tile in CUDA memory.

        Raises:
            CudaError: if the library has not been compiled with CUDA.
        """
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaInferenceTile(self)

        # need also to copy construct!
        tile.alpha = self.alpha.cuda(device)
        if self.reference_combined_weights is not None:
            tile.reference_combined_weights = self.reference_combined_weights.to(device)
        if self.programmed_weights is not None:
            tile.programmed_weights = self.programmed_weights.to(device)
        if self.nu_drift_list is not None:
            tile.nu_drift_list = [nu.to(device) for nu in self.nu_drift_list]

        return tile
Exemplo n.º 3
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> 'CudaFloatingPointTile':
        if self.stream != current_stream(device):
            raise CudaError("Cannot switch streams of existing Cuda tiles")

        return self
Exemplo n.º 4
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> 'CudaAnalogTile':
        if self.stream != current_stream(device):
            raise CudaError('Cannot switch CUDA devices of existing Cuda tiles')

        return self
Exemplo n.º 5
0
    def __init__(self, source_tile: FloatingPointTile):
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        # Create a new instance of the rpu config.
        new_rpu_config = deepcopy(source_tile.rpu_config)

        # Create the tile, replacing the simulator tile.
        super().__init__(source_tile.out_size, source_tile.in_size, new_rpu_config,
                         source_tile.bias, source_tile.in_trans, source_tile.out_trans)
        self.cuda(self.device)
Exemplo n.º 6
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str,
                                   int]] = None) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory."""

        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaFloatingPointTile(self)

        return tile
Exemplo n.º 7
0
    def __init__(self, source_tile: FloatingPointTile):
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        # Create a new instance of the rpu config.
        new_rpu_config = deepcopy(source_tile.rpu_config)

        # Create the tile, replacing the simulator tile.
        super().__init__(source_tile.out_size, source_tile.in_size, new_rpu_config,
                         source_tile.bias, source_tile.in_trans, source_tile.out_trans)
        self.tile = tiles.CudaFloatingPointTile(source_tile.tile)

        # Set the cuda properties
        self.stream = current_stream()
        self.device = torch_device(current_device())
Exemplo n.º 8
0
    def cpu(self) -> 'BaseTile':
        """Return a copy of this tile in CPU memory.

        Note:
            CUDA tiles weight can be accessed by `get_weights` etc
            methods, there is no need to move them to CPU and it is
            currently not supported.

        Returns:
            self in case of CPU

        Raises:
            CudaError: if a CUDA tile is moved to CPU
        """
        if self.is_cuda:
            raise CudaError('Currently it is not possible to move CUDA tile to cpu.')

        return self
Exemplo n.º 9
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device

        Returns:
            A copy of this tile in CUDA memory.

        Raises:
            CudaError: if the library has not been compiled with CUDA.
        """
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaAnalogTile(self)

        return tile
Exemplo n.º 10
0
 def cpu(self) -> 'BaseTile':
     """Return a copy of this tile in CPU memory."""
     raise CudaError('CUDA tiles cannot be moved to CPU')