Exemplo n.º 1
0
    def test_sequential_move_to_cuda_via_to(self):
        """Test moving AnalogSequential to cuda (from CPU), using ``.to()``."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', current_device())

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.to(device('cuda'))

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Exemplo n.º 2
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str,
                                   int]] = None) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device

        Returns:
            Self with the underlying C++ tile moved to CUDA memory.

        Raises:
            CudaError: if the library has not been compiled with CUDA.
        """
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        device = torch_device('cuda', cuda_device(device).idx)

        if self.is_cuda and device != self.device:
            raise CudaError(
                'Cannot switch CUDA devices of existing Cuda tiles')

        if isinstance(self.tile, tiles.AnalogTile):
            with cuda_device(device):
                self.tile = tiles.CudaAnalogTile(self.tile)
                self.is_cuda = True
                self.device = device
                self.analog_ctx.cuda(device)

        return self
Exemplo n.º 3
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str,
                                   int]] = None) -> BaseTile:
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device
        """
        if not cuda.is_compiled():
            raise RuntimeError(
                'aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaInferenceTile(self)

        # need also to copy construct!
        tile.alpha = self.alpha.cuda(device)
        if self.reference_combined_weights is not None:
            tile.reference_combined_weights = self.reference_combined_weights.to(
                device)
        if self.programmed_weights is not None:
            tile.programmed_weights = self.programmed_weights.to(device)
        if self.nu_drift_list is not None:
            tile.nu_drift_list = [nu.to(device) for nu in self.nu_drift_list]

        return tile
Exemplo n.º 4
0
    def test_cuda_instantiation(self):
        """Test whether cuda weights are copied correctly."""
        if not self.use_cuda and not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        python_tile = self.get_tile(10, 12)
        init_weights = python_tile.tile.get_weights()

        cuda_python_tile = python_tile.cuda()
        init_weights_cuda = cuda_python_tile.tile.get_weights()
        assert_array_almost_equal(init_weights, init_weights_cuda)
Exemplo n.º 5
0
    def __init__(self, source_tile: FloatingPointTile):
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        # Create a new instance of the rpu config.
        new_rpu_config = deepcopy(source_tile.rpu_config)

        # Create the tile, replacing the simulator tile.
        super().__init__(source_tile.out_size, source_tile.in_size, new_rpu_config,
                         source_tile.bias, source_tile.in_trans, source_tile.out_trans)
        self.cuda(self.device)
Exemplo n.º 6
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory."""

        if not cuda.is_compiled():
            raise RuntimeError('aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaFloatingPointTile(self)

        return tile
Exemplo n.º 7
0
    def test_save_load_model_cross_device(self):
        """Test saving and loading a model directly."""

        if not cuda.is_compiled():
            raise SkipTest('CUDA not available.')

        model = self.get_layer()

        map_location = 'cuda'
        if self.use_cuda:
            map_location = 'cpu'

        # Keep track of the current weights and biases for comparing.
        (model_weights, model_biases, tile_weights, tile_biases,
         _) = self.get_layer_and_tile_weights(model)
        assert_array_almost_equal(model_weights, tile_weights)
        if self.bias:
            assert_array_almost_equal(model_biases, tile_biases)

        # Save the model to a file.
        with TemporaryFile() as file:
            save(model, file)
            # Load the model.
            file.seek(0)
            new_model = load(file, map_location=device(map_location))

        # Compare the new model weights and biases.
        (new_model_weights, new_model_biases, new_tile_weights,
         new_tile_biases, _) = self.get_layer_and_tile_weights(new_model)

        assert_array_almost_equal(model_weights, new_model_weights)
        assert_array_almost_equal(tile_weights, new_tile_weights)
        if self.bias:
            assert_array_almost_equal(model_biases, new_model_biases)
            assert_array_almost_equal(tile_biases, new_tile_biases)

        # Asserts over the AnalogContext of the new model.
        new_analog_tile = self.get_analog_tile(new_model)
        analog_tile = self.get_analog_tile(model)

        self.assertTrue(hasattr(new_analog_tile.analog_ctx, 'analog_tile'))
        self.assertIsInstance(new_analog_tile.analog_ctx.analog_tile,
                              analog_tile.__class__)

        self.assertTrue(new_analog_tile.is_cuda != analog_tile.is_cuda)

        if analog_tile.shared_weights is not None:
            self.assertTrue(
                new_analog_tile.shared_weights.device.type == map_location)
Exemplo n.º 8
0
    def __init__(self, source_tile: FloatingPointTile):
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        # Create a new instance of the rpu config.
        new_rpu_config = deepcopy(source_tile.rpu_config)

        # Create the tile, replacing the simulator tile.
        super().__init__(source_tile.out_size, source_tile.in_size, new_rpu_config,
                         source_tile.bias, source_tile.in_trans, source_tile.out_trans)
        self.tile = tiles.CudaFloatingPointTile(source_tile.tile)

        # Set the cuda properties
        self.stream = current_stream()
        self.device = torch_device(current_device())
Exemplo n.º 9
0
    def __init__(self,
                 out_size: int,
                 in_size: int,
                 resistive_device: Optional[BaseResistiveDevice] = None,
                 bias: bool = False,
                 in_trans: bool = False,
                 out_trans: bool = False):
        if not cuda.is_compiled():
            raise RuntimeError(
                'aihwkit has not been compiled with CUDA support')
        super().__init__(out_size, in_size, resistive_device, bias, in_trans,
                         out_trans)

        self.tile = tiles.CudaAnalogTile(self.tile)
        self.stream = current_stream()
        self.device = torch_device(current_device())
Exemplo n.º 10
0
    def cuda(
        self,
        device: Optional[Union[torch_device, str, int]] = None
    ) -> 'CudaIndexedAnalogTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device
        """
        if not cuda.is_compiled():
            raise RuntimeError(
                'aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaIndexedAnalogTile(self)

        return tile
Exemplo n.º 11
0
    def __init__(self, source_tile: AnalogTile):
        if not cuda.is_compiled():
            raise RuntimeError(
                'aihwkit has not been compiled with CUDA support')

        # Create a new instance of the resistive device.
        new_resistive_device = deepcopy(source_tile.resistive_device)

        # Create the tile, replacing the simulator tile.
        super().__init__(source_tile.out_size, source_tile.in_size,
                         new_resistive_device, source_tile.bias,
                         source_tile.in_trans, source_tile.out_trans)
        self.tile = tiles.CudaAnalogTile(source_tile.tile)

        # Set the cuda properties
        self.stream = current_stream()
        self.device = torch_device(current_device())
Exemplo n.º 12
0
    def cuda(
        self,
        device: Optional[Union[torch_device, str, int]] = None
    ) -> 'CudaIndexedFloatingPointTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device
        """
        if not cuda.is_compiled():
            raise RuntimeError(
                'aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaIndexedFloatingPointTile(self.out_size, self.in_size,
                                                self.resistive_device,
                                                self.bias, self.in_trans,
                                                self.out_trans)
        return tile
Exemplo n.º 13
0
    def test_sequential_move_to_cuda_multiple_gpus(self):
        """Test moving AnalogSequential to cuda (from CPU), using ``.to()``."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')
        if device_count() < 2:
            raise SkipTest('Need at least two devices for this test')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        # Test whether it can move to GPU with index 1
        expected_device_num = 1

        layer = self.get_layer()
        if isinstance(layer.analog_tile.tile.__class__,
                      (tiles.CudaAnalogTile, tiles.CudaFloatingPointTile)):
            raise SkipTest('Layer is already on CUDA')

        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', expected_device_num)

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda(device('cuda', expected_device_num))

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Exemplo n.º 14
0
    def test_sequential_move_to_cuda(self):
        """Test sequential cuda."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            AnalogTile: CudaAnalogTile,
            CudaAnalogTile: CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.__class__]

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda()

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile, expected_class)
Exemplo n.º 15
0
class TestLocalRunner(AihwkitTestCase):
    """Test LocalRunner."""

    def setUp(self) -> None:
        if not os.getenv('TEST_DATASET'):
            raise SkipTest('TEST_DATASET not set')

    def test_run_example_cpu(self):
        """Test running the example using a local runner."""
        training_experiment = self.get_experiment()
        local_runner = LocalRunner(device=torch_device('cpu'))

        with patch('sys.stdout', new=StringIO()) as captured_stdout:
            result = local_runner.run(training_experiment, max_elements_train=10)

        # Asserts over stdout.
        self.assertIn('Epoch: ', captured_stdout.getvalue())

        # Asserts over the returned results.
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['epoch'], 0)
        self.assertIn('train_loss', result[0])
        self.assertIn('accuracy', result[0])

    @skipIf(not cuda.is_compiled(), 'not compiled with CUDA support')
    def test_run_example_gpu(self):
        """Test running the example using a local runner."""
        training_experiment = self.get_experiment()
        local_runner = LocalRunner(device=torch_device('cuda'))

        with patch('sys.stdout', new=StringIO()) as captured_stdout:
            result = local_runner.run(training_experiment, max_elements_train=10)

        # Asserts over stdout.
        self.assertIn('Epoch: ', captured_stdout.getvalue())

        # Asserts over the returned results.
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['epoch'], 0)
        self.assertIn('train_loss', result[0])
        self.assertIn('accuracy', result[0])
Exemplo n.º 16
0
    def cuda(
            self,
            device: Optional[Union[torch_device, str, int]] = None
    ) -> 'BaseTile':
        """Return a copy of this tile in CUDA memory.

        Args:
            device: CUDA device

        Returns:
            A copy of this tile in CUDA memory.

        Raises:
            CudaError: if the library has not been compiled with CUDA.
        """
        if not cuda.is_compiled():
            raise CudaError('aihwkit has not been compiled with CUDA support')

        with cuda_device(device):
            tile = CudaFloatingPointTile(self)

        return tile
Exemplo n.º 17
0
def get_tile_for_plotting(rpu_config: SingleRPUConfig,
                          n_traces: int,
                          use_cuda: bool = False,
                          noise_free: bool = False) -> BaseTile:
    """Returns an analog tile for plotting the response curve.

    Args:
        rpu_config: RPU Configuration to use for plotting
        n_traces: Number of traces to plot
        use_cuda: Whether to use the CUDA implementation (if available)
        noise_free: Whether to turn-off cycle-to-cycle noises

    Returns:
        Instantiated tile.
    """
    config = deepcopy(rpu_config)

    # Make sure we use single pulses for the overview.
    config.update.update_bl_management = False
    config.update.update_management = False
    config.update.desired_bl = 1

    if noise_free:
        config.forward.is_perfect = True

        config.device.dw_min_std = 0.0  # Noise free.
        if (hasattr(config.device, 'write_noise_std')
                and getattr(config.device, 'write_noise_std') > 0.0):
            # Just make very small to avoid hidden parameter mismatch.
            setattr(config.device, 'write_noise_std', 1e-6)

    analog_tile = AnalogTile(n_traces, 1, config)  # type: BaseTile
    analog_tile.set_learning_rate(1)
    weights = config.device.as_bindings().w_min * ones((n_traces, 1))
    analog_tile.set_weights(weights)

    if use_cuda and cuda.is_compiled():
        return analog_tile.cuda()
    return analog_tile
Exemplo n.º 18
0
    def test_save_with_cuda(self):
        """Whether model is correctly reconstructed after saving"""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        model = AnalogSequential(layer)
        model.cuda()
        with TemporaryFile() as file:
            save(model.state_dict(), file)
            # Create a new model and load its state dict.
            file.seek(0)
            checkpoint = load(file)
        model.load_state_dict(checkpoint)

        expected_device = device('cuda', current_device())
        expected_class = tile_classes[layer.analog_tile.tile.__class__]

        analog_tile = model[0].analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Exemplo n.º 19
0
# Imports from PyTorch.
import torch
from torch import nn
from torchvision import datasets, transforms

# Imports from aihwkit.
from aihwkit.nn import AnalogConv2d, AnalogLinear, AnalogSequential
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.configs import SingleRPUConfig, FloatingPointRPUConfig
from aihwkit.simulator.configs.devices import ConstantStepDevice, FloatingPointDevice
from aihwkit.simulator.rpu_base import cuda

# Check device
USE_CUDA = 0
if cuda.is_compiled():
    USE_CUDA = 1
DEVICE = torch.device('cuda' if USE_CUDA else 'cpu')

# Path to store datasets
PATH_DATASET = os.path.join('data', 'DATASET')

# Path to store results
RESULTS = os.path.join(os.getcwd(), 'results', 'LENET5')

# Training parameters
SEED = 1
N_EPOCHS = 30
BATCH_SIZE = 8
LEARNING_RATE = 0.01
N_CLASSES = 10
Exemplo n.º 20
0
NUM_LAYERS = 1
INPUT_SIZE = 1
EMBED_SIZE = 20
HIDDEN_SIZE = 50
OUTPUT_SIZE = 1
DROPOUT_RATIO = 0.0
NOISE = 0.0

EPOCHS = 50
BATCH_SIZE = 5
SEQ_LEN = 501
RNN_CELL = AnalogGRUCell  # type of RNN cell
WITH_EMBEDDING = True  # RNN with embedding
WITH_BIDIR = False
USE_ANALOG_TRAINING = False  # or hardware-aware training
DEVICE = torch.device('cuda') if cuda.is_compiled() else torch.device('cpu')

if USE_ANALOG_TRAINING:
    # Define a RPU configuration for analog training
    rpu_config = GokmenVlasovPreset()

else:
    # Define an RPU configuration using inference/hardware-aware training tile
    rpu_config = InferenceRPUConfig()
    rpu_config.forward.out_res = -1.  # Turn off (output) ADC discretization.
    rpu_config.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
    rpu_config.forward.w_noise = 0.02  # Short-term w-noise.

    rpu_config.clip.type = WeightClipType.FIXED_VALUE
    rpu_config.clip.fixed_value = 1.0
    rpu_config.modifier.pdrop = 0.03  # Drop connect.
Exemplo n.º 21
0
    def setUp(self) -> None:
        if self.use_cuda and not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        super().setUp()
Exemplo n.º 22
0
                            resistive_device=FloatingPointResistiveDevice())

    def get_digital_layer(self,
                          in_channels=2,
                          out_channels=3,
                          kernel_size=4,
                          padding=2):
        """Return a digital layer."""
        return Conv2d(in_channels=in_channels,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=padding,
                      bias=True)


@skipIf(not cuda.is_compiled(), 'not compiled with CUDA support')
class CudaAnalogConv2dTestNoBias(AnalogConv2dTestNoBias):
    """Test for AnalogConv2d (no bias, CUDA)."""

    USE_CUDA = True

    def get_layer(self,
                  in_channels=2,
                  out_channels=3,
                  kernel_size=4,
                  padding=2):
        """Return a layer."""
        return AnalogConv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
Exemplo n.º 23
0
def get_tile_for_plotting(rpu_config: Union[SingleRPUConfig,
                                            UnitCellRPUConfig],
                          n_traces: int,
                          use_cuda: bool = False,
                          noise_free: bool = False) -> BaseTile:
    """Return an analog tile for plotting the response curve.

    Args:
        rpu_config: RPU Configuration to use for plotting
        n_traces: Number of traces to plot
        use_cuda: Whether to use the CUDA implementation (if available)
        noise_free: Whether to turn-off cycle-to-cycle noises (if possible)

    Returns:
        Instantiated tile.
    """
    def set_noise_free(dev: Any) -> Any:
        if hasattr(dev, 'dw_min_std'):
            dev.dw_min_std = 0.0  # Noise free.

        if hasattr(dev, 'refresh_forward'):
            setattr(dev, 'refresh_forward', IOParameters(is_perfect=True))

        if hasattr(dev, 'refresh_update'):
            setattr(dev, 'refresh_update',
                    UpdateParameters(pulse_type=PulseType.NONE))

        if hasattr(dev, 'transfer_forward'):
            setattr(dev, 'refresh_forward', IOParameters(is_perfect=True))

        if hasattr(dev, 'transfer_update'):
            setattr(dev, 'transfer_update',
                    UpdateParameters(pulse_type=PulseType.NONE))

        if (hasattr(dev, 'write_noise_std')
                and getattr(dev, 'write_noise_std') > 0.0):
            # Just make very small to avoid hidden parameter mismatch.
            setattr(dev, 'write_noise_std', 1e-6)

    config = deepcopy(rpu_config)

    # Make sure we use single pulses for the overview.
    config.update.update_bl_management = False
    config.update.update_management = False
    config.update.desired_bl = 1

    if noise_free:
        config.forward.is_perfect = True

        set_noise_free(config.device)
        if hasattr(config.device, 'unit_cell_devices'):
            for dev in getattr(config.device, 'unit_cell_devices'):
                set_noise_free(dev)
        if hasattr(config.device, 'device'):
            set_noise_free(getattr(config.device, 'device'))

    analog_tile = AnalogTile(n_traces, 1, config)  # type: BaseTile
    analog_tile.set_learning_rate(1)
    w_min = getattr(config.device.as_bindings(), 'w_min', -1.0)

    weights = w_min * ones((n_traces, 1))
    analog_tile.set_weights(weights)

    if use_cuda and cuda.is_compiled():
        return analog_tile.cuda()
    return analog_tile
Exemplo n.º 24
0
"""

# pylint: disable=redefined-outer-name, too-many-locals, invalid-name

import csv
import os
from typing import List, Tuple
import numpy as np
import matplotlib.pyplot as plt

# Imports from aihwkit.
from aihwkit.utils.visualization import plot_device_compact
from aihwkit.simulator.configs.devices import PiecewiseStepDevice
from aihwkit.simulator.rpu_base import cuda

USE_CUDA = cuda.is_compiled()

# Declare variables and .csv path
DEVICE_FIT = True
if DEVICE_FIT:
    FILE_NAME = os.path.join(os.path.dirname(__file__), 'csv',
                             'gong_et_al.csv')
else:
    FILE_NAME = os.path.join(os.path.dirname(__file__), 'csv',
                             'selfdefine.csv')


def read_from_file(
    filename: str,
    from_pulse_response: bool = True,
    n_segments: int = 10,