def main():
    """Create and execute an experiment."""
    model = AnalogSequential(
        Flatten(),
        AnalogLinear(INPUT_SIZE,
                     HIDDEN_SIZES[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[0],
                     HIDDEN_SIZES[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[1],
                     OUTPUT_SIZE,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        LogSoftmax(dim=1))

    # Create the training Experiment.
    experiment = BasicTrainingWithScheduler(dataset=FashionMNIST,
                                            model=model,
                                            epochs=EPOCHS,
                                            batch_size=BATCH_SIZE)

    # Create the runner and execute the experiment.
    runner = LocalRunner(device=DEVICE)
    results = runner.run(experiment, dataset_root=PATH_DATASET)
    print(results)
Esempio n. 2
0
def create_analog_network(input_size, hidden_sizes, output_size):
    """Create the neural network using analog and digital layers.

    Args:
        input_size (int): size of the Tensor at the input.
        hidden_sizes (list): list of sizes of the hidden layers (2 layers).
        output_size (int): size of the Tensor at the output.
    """
    model = AnalogSequential(
        AnalogLinear(input_size,
                     hidden_sizes[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[0],
                     hidden_sizes[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[1],
                     output_size,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.LogSoftmax(dim=1))

    if USE_CUDA:
        model.cuda()

    print(model)
    return model
Esempio n. 3
0
def plot_device(device: PulsedDevice,
                w_noise: float = 0.0,
                **kwargs: Any) -> None:
    """Plots the step response figure for a given device (preset).

    Note:
        It will use an amount of read weight noise ``w_noise`` for
        reading the weights.

    Args:
        device: PulsedDevice parameters
        w_noise: Weight noise standard deviation during read
        kwargs: for other parameters, see :func:`plot_response_overview`
    """
    plt.figure(figsize=[7, 7])
    # To simulate some weight read noise.
    io_pars = IOParameters(
        out_noise=0.0,  # no out noise
        w_noise=w_noise,  # quite low
        inp_res=-1.,  # turn off DAC
        out_bound=100.,  # not limiting
        out_res=-1.,  # turn off ADC
        bound_management=BoundManagementType.NONE,
        noise_management=NoiseManagementType.NONE,
        w_noise_type=WeightNoiseType.ADDITIVE_CONSTANT)

    rpu_config = SingleRPUConfig(device=device, forward=io_pars)

    plot_response_overview(rpu_config, **kwargs)
Esempio n. 4
0
    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 num_layers: int = 1,
                 dropout: float = 0.0,
                 bias: bool = True,
                 rpu_config: Optional[RPUConfigAlias] = None,
                 realistic_read_write: bool = False,
                 xavier: bool = False):
        super().__init__()

        # Default to SingleRPUConfig with ConstantStepDevice.
        if not rpu_config:
            rpu_config = SingleRPUConfig(device=ConstantStepDevice())

        self.lstm = ModularAnalogLSTMWithDropout(num_layers,
                                                 AnalogLSTMLayer,
                                                 dropout,
                                                 first_layer_args=[
                                                     AnalogLSTMCell,
                                                     input_size, hidden_size,
                                                     bias, rpu_config,
                                                     realistic_read_write
                                                 ],
                                                 other_layer_args=[
                                                     AnalogLSTMCell,
                                                     hidden_size, hidden_size,
                                                     bias, rpu_config,
                                                     realistic_read_write
                                                 ])
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.reset_parameters(xavier)
Esempio n. 5
0
    def test_save_load_meta_parameter(self):
        """Test saving and loading a device with custom parameters."""
        # Create the device and the array.
        rpu_config = SingleRPUConfig(
            forward=IOParameters(inp_noise=0.321),
            backward=IOParameters(inp_noise=0.456),
            update=UpdateParameters(desired_bl=78),
            device=ConstantStepDevice(w_max=0.987)
        )

        model = self.get_layer(rpu_config=rpu_config)

        # Save the model to a file.
        with TemporaryFile() as file:
            save(model, file)
            # Load the model.
            file.seek(0)
            new_model = load(file)

        # Assert over the new model tile parameters.
        new_analog_tile = self.get_analog_tile(new_model)
        analog_tile = self.get_analog_tile(model)

        parameters = new_analog_tile.tile.get_parameters()
        self.assertAlmostEqual(parameters.forward_io.inp_noise, 0.321)
        self.assertAlmostEqual(parameters.backward_io.inp_noise, 0.456)
        self.assertAlmostEqual(parameters.update.desired_bl, 78)
        self.assertTrue(new_analog_tile.is_cuda == analog_tile.is_cuda)
Esempio n. 6
0
    def _setup_tile(self,
                    in_features: int,
                    out_features: int,
                    bias: bool,
                    rpu_config: Optional[Union[FloatingPointRPUConfig,
                                               SingleRPUConfig,
                                               UnitCellRPUConfig,
                                               InferenceRPUConfig]] = None,
                    realistic_read_write: bool = False) -> BaseTile:
        """Create an analog tile and setup this layer for using it.

        Create an analog tile to be used for the basis of this layer operations,
        and setup additional attributes of this instance that are needed for
        using the analog tile.

        Note:
            This method also sets the following attributes, which are assumed
            to be set by the rest of the methods:
            * ``self.use_bias``
            * ``self.realistic_read_write``
            * ``self.in_features``
            * ``self.out_features``

        Args:
            in_features: input vector size (number of columns).
            out_features: output vector size (number of rows).
            rpu_config: resistive processing unit configuration.
            bias: whether to use a bias row on the analog tile or not.
            realistic_read_write: whether to enable realistic read/write
               for setting initial weights and read out of weights.

        Returns:
            An analog tile with the requested parameters.
        """
        # pylint: disable=attribute-defined-outside-init
        # Default to constant step device if not provided.
        if not rpu_config:
            rpu_config = SingleRPUConfig()

        # Setup the analog-related attributes of this instance.
        self.use_bias = bias
        self.realistic_read_write = realistic_read_write
        self.in_features = in_features
        self.out_features = out_features

        # Create the tile.
        if isinstance(rpu_config, FloatingPointRPUConfig):
            tile_class = self.TILE_CLASS_FLOATING_POINT
        elif isinstance(rpu_config, InferenceRPUConfig):
            tile_class = self.TILE_CLASS_INFERENCE
        else:
            tile_class = self.TILE_CLASS_ANALOG  # type: ignore

        return tile_class(
            out_features,
            in_features,
            rpu_config,
            bias=bias  # type: ignore
        )
Esempio n. 7
0
    def __init__(
            self,
            in_features: int,
            out_features: int,
            bias: bool = True,
            rpu_config: Optional[RPUConfigAlias] = None,
            realistic_read_write: bool = False,
            weight_scaling_omega: Optional[float] = None,
    ):

        # Call super() after tile creation, including ``reset_parameters``.
        Linear.__init__(self, in_features, out_features, bias=bias)

        # Create tiles
        if rpu_config is None:
            rpu_config = SingleRPUConfig()

        AnalogModuleBase.__init__(
            self,
            in_features,
            out_features,
            bias,
            realistic_read_write,
            rpu_config.mapping
        )
        if self.analog_bias:
            raise ModuleError("AnalogLinearMapped only supports digital bias.")

        # More than one tile may need to be created. If so, divide
        # weight matrix into equal pieces along input dimension with
        # as many tiles as needed
        max_input_size = rpu_config.mapping.max_input_size
        max_output_size = rpu_config.mapping.max_output_size

        self.in_sizes = self.get_split_sizes(in_features, max_input_size)
        self.out_sizes = self.get_split_sizes(out_features, max_output_size)

        self.analog_tile_array = []
        for i, in_tile_size in enumerate(self.in_sizes):
            in_tiles = []
            for j, out_tile_size in enumerate(self.out_sizes):
                tile = rpu_config.tile_class(out_tile_size,
                                             in_tile_size,
                                             rpu_config,
                                             bias=self.analog_bias)
                self.register_analog_tile(tile, name=f"{i}_{j}")
                in_tiles.append(tile)
            self.analog_tile_array.append(in_tiles)

        # Set weights from the reset_parameters
        self.set_weights(self.weight, self.bias, remap_weights=True,
                         weight_scaling_omega=weight_scaling_omega)

        # Unregister weight/bias as a parameter but keep for sync
        self.unregister_parameter('weight')

        if self.analog_bias:
            self.unregister_parameter('bias')
Esempio n. 8
0
    def _setup_tile(self,
                    in_features: int,
                    out_features: int,
                    bias: bool,
                    rpu_config: Optional[RPUConfigAlias] = None,
                    realistic_read_write: bool = False,
                    weight_scaling_omega: float = 0.0) -> 'BaseTile':
        """Create an analog tile and setup this layer for using it.

        Create an analog tile to be used for the basis of this layer operations,
        and setup additional attributes of this instance that are needed for
        using the analog tile.

        If ``weight_scaling_omega`` is larger than 0, the weights are set in a
        scaled manner (assuming a digital output scale). See
        :meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`
        for details.

        Note:
            This method also sets the following attributes, which are assumed
            to be set by the rest of the methods:
            * ``self.use_bias``
            * ``self.realistic_read_write``
            * ``self.weight_scaling_omega``
            * ``self.in_features``
            * ``self.out_features``

        Args:
            in_features: input vector size (number of columns).
            out_features: output vector size (number of rows).
            rpu_config: resistive processing unit configuration.
            bias: whether to use a bias row on the analog tile or not.
            realistic_read_write: whether to enable realistic read/write
                for setting initial weights and read out of weights.
            weight_scaling_omega: the weight value where the max
                weight will be scaled to. If zero, no weight scaling will
                be performed

        Returns:
            An analog tile with the requested parameters.
        """
        # pylint: disable=attribute-defined-outside-init
        # Default to constant step device if not provided.
        if not rpu_config:
            rpu_config = SingleRPUConfig()

        # Setup the analog-related attributes of this instance.
        self.use_bias = bias
        self.realistic_read_write = realistic_read_write
        self.weight_scaling_omega = weight_scaling_omega
        self.in_features = in_features
        self.out_features = out_features

        # Create the tile.
        return rpu_config.tile_class(out_features,
                                     in_features,
                                     rpu_config,
                                     bias=bias)
Esempio n. 9
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: Tuple[int, ...],
        stride: Tuple[int, ...],
        padding: Tuple[int, ...],
        dilation: Tuple[int, ...],
        transposed: bool,
        output_padding: Tuple[int, ...],
        groups: int,
        bias: bool,
        padding_mode: str,
        rpu_config: Optional[RPUConfigAlias] = None,
        realistic_read_write: bool = False,
        weight_scaling_omega: Optional[float] = None,
    ):
        # pylint: disable=too-many-arguments, too-many-locals
        if groups != 1:
            raise ValueError('Only one group is supported')
        if padding_mode != 'zeros':
            raise ValueError('Only "zeros" padding mode is supported')

        # Call super() after tile creation, including ``reset_parameters``.
        _ConvNd.__init__(self, in_channels, out_channels, kernel_size, stride,
                         padding, dilation, transposed, output_padding, groups,
                         bias, padding_mode)

        # Create the tile and set the analog.
        if rpu_config is None:
            rpu_config = SingleRPUConfig()

        AnalogModuleBase.__init__(
            self, self.get_tile_size(in_channels, groups, kernel_size),
            out_channels, bias, realistic_read_write, rpu_config.mapping)
        self.analog_tile = self._setup_tile(rpu_config)

        # Register analog tile
        self.register_analog_tile(self.analog_tile)

        # Set weights from the reset_parameters
        self.set_weights(self.weight,
                         self.bias,
                         remap_weights=True,
                         weight_scaling_omega=weight_scaling_omega)

        # Set the index matrices.
        self.fold_indices = Tensor().detach()
        self.input_size = 0
        self.tensor_view = (-1, )  # type: Tuple[int, ...]

        # Unregister weight/bias as a parameter but keep it for syncs
        self.unregister_parameter('weight')
        if self.analog_bias:
            self.unregister_parameter('bias')
Esempio n. 10
0
 def __init__(
         self,
         out_size: int,
         in_size: int,
         rpu_config: Optional[Union[SingleRPUConfig, UnitCellRPUConfig,
                                    InferenceRPUConfig]] = None,
         bias: bool = False,
         in_trans: bool = False,
         out_trans: bool = False,
 ):
     rpu_config = rpu_config or SingleRPUConfig(device=ConstantStepDevice())
     super().__init__(out_size, in_size, rpu_config, bias, in_trans, out_trans)
Esempio n. 11
0
 def get_rpu_config(self):
     rpu_config = SingleRPUConfig(
         device=ConstantStepDevice(w_max_dtod=0,
                                   w_min_dtod=0,
                                   dw_min_std=0.0,
                                   dw_min=0.0001,
                                   dw_min_dtod=0.0,
                                   up_down_dtod=0.0,
                                   w_max=1.0,
                                   w_min=-1.0))
     rpu_config.forward.is_perfect = True
     rpu_config.backward.is_perfect = True
     return rpu_config
Esempio n. 12
0
    def get_custom_tile(self, out_size, in_size, **parameters):
        """Return a tile with custom parameters for the resistive device."""
        if 'FloatingPoint' in self.parameter:
            rpu_config = FloatingPointRPUConfig(device=FloatingPointDevice(
                **parameters))
        else:
            rpu_config = SingleRPUConfig(device=ConstantStepDevice(
                **parameters))

        python_tile = self.get_tile(out_size, in_size, rpu_config)
        self.set_init_weights(python_tile)

        return python_tile
Esempio n. 13
0
def plot_device_symmetry(
    device: PulsedDevice,
    w_noise: float = 0.0,
    n_pulses: int = 10000,
    n_traces: int = 3,
    use_cuda: bool = False,
    w_init: float = 1.0,
) -> None:
    """Plot the response figure for a given device (preset).

    It will show the response to alternating up down pulses.

    Note:
        It will use an amount of read weight noise ``w_noise`` for
        reading the weights.

    Args:
        device: PulsedDevice parameters
        n_pulses: total number of pulses
        w_noise: Weight noise standard deviation during read
        n_traces: Number of device traces
        use_cuda: Whether to use CUDA,
        w_init: Initial value of the weights
    """
    plt.figure(figsize=[10, 5])

    io_pars = IOParameters(
        out_noise=0.0,  # no out noise
        w_noise=w_noise,  # quite low
        inp_res=-1.,  # turn off DAC
        out_bound=100.,  # not limiting
        out_res=-1.,  # turn off ADC
        bound_management=BoundManagementType.NONE,
        noise_management=NoiseManagementType.NONE,
        w_noise_type=WeightNoiseType.ADDITIVE_CONSTANT)

    rpu_config = SingleRPUConfig(device=device, forward=io_pars)

    direction = np.sign(np.cos(np.pi * np.arange(n_pulses)))
    plt.clf()

    analog_tile = get_tile_for_plotting(rpu_config,
                                        n_traces,
                                        use_cuda,
                                        noise_free=False)
    weights = w_init * ones((n_traces, 1))
    analog_tile.set_weights(weights)

    plot_pulse_response(analog_tile, direction, use_forward=False)
    plt.ylim([-1, 1])
    plt.grid(True)
Esempio n. 14
0
    def test_hidden_parameter_mismatch(self):
        """Test for error if tile structure mismatches."""
        model = self.get_layer()
        state_dict = model.state_dict()

        # Create the device and the array.
        rpu_config = SingleRPUConfig(
            device=LinearStepDevice()  # different hidden structure
        )

        new_model = self.get_layer(rpu_config=rpu_config)
        if new_model.analog_tile.__class__.__name__ != model.analog_tile.__class__.__name__:
            with self.assertRaises(TileError):
                self.assertRaises(new_model.load_state_dict(state_dict))
Esempio n. 15
0
    def get_noisefree_tile(self, out_size, in_size):
        """Return a tile of the specified dimensions with noisiness turned off."""
        rpu_config = None

        if 'FloatingPoint' not in self.parameter:
            rpu_config = SingleRPUConfig(
                forward=IOParameters(is_perfect=True),
                backward=IOParameters(is_perfect=True),
                device=IdealDevice())

        python_tile = self.get_tile(out_size, in_size, rpu_config)
        self.set_init_weights(python_tile)

        return python_tile
Esempio n. 16
0
    def __init__(
            self,
            out_size: int,
            in_size: int,
            rpu_config: Optional[Union['SingleRPUConfig', 'UnitCellRPUConfig',
                                       'InferenceRPUConfig']] = None,
            bias: bool = False,
            in_trans: bool = False,
            out_trans: bool = False,
    ):
        if not rpu_config:
            # Import `SingleRPUConfig` dynamically to avoid import cycles.
            # pylint: disable=import-outside-toplevel
            from aihwkit.simulator.configs import SingleRPUConfig
            rpu_config = SingleRPUConfig(device=ConstantStepDevice())

        super().__init__(out_size, in_size, rpu_config, bias, in_trans, out_trans)
Esempio n. 17
0
    def test_save_load_weight_scaling_omega(self):
        """Test saving and loading a device with weight scaling omega."""
        # Create the device and the array.
        rpu_config = SingleRPUConfig(mapping=MappingParameter(weight_scaling_omega=0.4))
        model = self.get_layer(rpu_config=rpu_config)
        analog_tile = self.get_analog_tile(model)
        alpha = analog_tile.get_out_scaling_alpha().detach().cpu()
        self.assertNotEqual(alpha, 1.0)

        # Save the model to a file.
        with TemporaryFile() as file:
            save(model, file)
            # Load the model.
            file.seek(0)
            new_model = load(file)

        # Assert over the new model tile parameters.
        new_analog_tile = self.get_analog_tile(new_model)
        alpha_new = new_analog_tile.get_out_scaling_alpha().detach().cpu()
        assert_array_almost_equal(array(alpha), array(alpha_new))
Esempio n. 18
0
    def __init__(
            self,
            in_features: int,
            out_features: int,
            bias: bool = True,
            rpu_config: Optional[RPUConfigAlias] = None,
            realistic_read_write: bool = False,
            weight_scaling_omega: Optional[float] = None,
              ):
        # Call super() after tile creation, including ``reset_parameters``.
        Linear.__init__(self, in_features, out_features, bias=bias)

        # Create tile
        if rpu_config is None:
            rpu_config = SingleRPUConfig()

        AnalogModuleBase.__init__(
            self,
            in_features,
            out_features,
            bias,
            realistic_read_write,
            weight_scaling_omega,
            rpu_config.mapping
        )
        self.analog_tile = self._setup_tile(rpu_config)

        # Register tile
        self.register_analog_tile(self.analog_tile)

        # Set weights from the reset_parameters call
        self.set_weights(self.weight, self.bias)

        # Unregister weight/bias as a parameter but keep it as a
        # field (needed for syncing still)
        self.unregister_parameter('weight')
        if self.analog_bias:
            self.unregister_parameter('bias')
Esempio n. 19
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: Tuple[int, ...],
        stride: Tuple[int, ...],
        padding: Tuple[int, ...],
        dilation: Tuple[int, ...],
        transposed: bool,
        output_padding: Tuple[int, ...],
        groups: int,
        bias: bool,
        padding_mode: str,
        rpu_config: Optional[RPUConfigAlias] = None,
        realistic_read_write: bool = False,
        weight_scaling_omega: Optional[float] = None,
    ):
        # pylint: disable=too-many-arguments, too-many-locals
        if groups != 1:
            raise ValueError('Only one group is supported')
        if padding_mode != 'zeros':
            raise ValueError('Only "zeros" padding mode is supported')

        # Call super() after tile creation, including ``reset_parameters``.
        _ConvNd.__init__(self, in_channels, out_channels, kernel_size, stride,
                         padding, dilation, transposed, output_padding, groups,
                         bias, padding_mode)

        # Create tiles
        if rpu_config is None:
            rpu_config = SingleRPUConfig()

        AnalogModuleBase.__init__(
            self, self.get_tile_size(in_channels, groups,
                                     kernel_size), out_channels, bias,
            realistic_read_write, weight_scaling_omega, rpu_config.mapping)

        if self.analog_bias:
            raise ModuleError("AnalogConvNdMapped only supports digital bias.")

        if not rpu_config:
            rpu_config = SingleRPUConfig()

        max_input_size = rpu_config.mapping.max_input_size
        max_output_size = rpu_config.mapping.max_output_size
        kernel_elem = self.in_features // self.in_channels
        self.in_sizes = self.get_split_sizes(self.in_features, max_input_size,
                                             kernel_elem)
        self.out_sizes = self.get_split_sizes(self.out_features,
                                              max_output_size)

        self.analog_tile_array = []
        for i, in_tile_size in enumerate(self.in_sizes):
            in_tiles = []
            for j, out_tile_size in enumerate(self.out_sizes):
                tile = rpu_config.tile_class(out_tile_size,
                                             in_tile_size * kernel_elem,
                                             rpu_config,
                                             bias=self.analog_bias)
                self.register_analog_tile(tile, name=f"{i}_{j}")
                in_tiles.append(tile)
            self.analog_tile_array.append(in_tiles)

        # Set weights from the reset_parameters (since now the
        # analog_tiles are registered)
        self.set_weights(self.weight, self.bias)

        # Set the index matrices.
        self.input_size = 0
        self.fold_indices_lst = []  # type: List[Tensor]

        # Unregister weight/bias as a parameter but keep it as a
        # field (needed for syncing still)
        self.unregister_parameter('weight')
        if self.analog_bias:
            self.unregister_parameter('bias')
Esempio n. 20
0
# Training parameters
SEED = 1
N_EPOCHS = 30
BATCH_SIZE = 8
LEARNING_RATE = 0.01
N_CLASSES = 10

# Select the device model to use in the training.
# * If `SingleRPUConfig(device=ConstantStepDevice())` then analog tiles with
#   constant step devices will be used,
# * If `FloatingPointRPUConfig(device=FloatingPointDevice())` then standard
#   floating point devices will be used
USE_ANALOG_TRAINING = True
if USE_ANALOG_TRAINING:
    RPU_CONFIG = SingleRPUConfig(device=ConstantStepDevice())
else:
    RPU_CONFIG = FloatingPointRPUConfig(device=FloatingPointDevice())


def load_images():
    """Load images for train from torchvision datasets."""

    transform = transforms.Compose([transforms.ToTensor()])
    train_set = datasets.MNIST(PATH_DATASET, download=True, train=True, transform=transform)
    val_set = datasets.MNIST(PATH_DATASET, download=True, train=False, transform=transform)
    train_data = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
    validation_data = torch.utils.data.DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)

    return train_data, validation_data
Esempio n. 21
0
INPUT_SIZE = 1
EMBED_SIZE = 20
HIDDEN_SIZE = 50
OUTPUT_SIZE = 1
DROPOUT_RATIO = 0.0
NOISE = 0.0

EPOCHS = 100
BATCH_SIZE = 5
SEQ_LEN = 501
WITH_EMBEDDING = False  # LSTM with embedding
USE_ANALOG_TRAINING = False  # or hardware-aware training

if USE_ANALOG_TRAINING:
    # Define a RPU configuration for analog training
    rpu_config = SingleRPUConfig(device=GokmenVlasovPreset())

else:
    # Define an RPU configuration using inference/hardware-aware training tile
    rpu_config = InferenceRPUConfig()
    rpu_config.forward.out_res = -1.  # Turn off (output) ADC discretization.
    rpu_config.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
    rpu_config.forward.w_noise = 0.02  # Short-term w-noise.

    rpu_config.clip.type = WeightClipType.FIXED_VALUE
    rpu_config.clip.fixed_value = 1.0
    rpu_config.modifier.pdrop = 0.03  # Drop connect.
    rpu_config.modifier.type = WeightModifierType.ADD_NORMAL  # Fwd/bwd weight noise.
    rpu_config.modifier.std_dev = 0.1
    rpu_config.modifier.rel_to_actual_wmax = True
Esempio n. 22
0
from torch import Tensor
from torch.nn.functional import mse_loss

# Imports from aihwkit.
from aihwkit.nn import AnalogLinear
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.configs import SingleRPUConfig
from aihwkit.simulator.configs.devices import ConstantStepDevice
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Define a single-layer network, using a constant step device type.
rpu_config = SingleRPUConfig(device=ConstantStepDevice())
model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

# Move the model and tensors to cuda if it is available.
if cuda.is_compiled():
    x = x.cuda()
    y = y.cuda()
    model.cuda()

# Define an analog-aware optimizer, preparing it for using the layers.
opt = AnalogSGD(model.parameters(), lr=0.1)
opt.regroup_param_groups(model)

for epoch in range(100):
    # Add the training Tensor to the model (input).
    pred = model(x)
Esempio n. 23
0
 def get_rpu_config(device: Union[PulsedDevice, UnitCell], io_pars: IOParameters) \
         -> Union[SingleRPUConfig, UnitCellRPUConfig]:
     if isinstance(device, PulsedDevice):
         return SingleRPUConfig(device=device, forward=io_pars)
     return UnitCellRPUConfig(device=device, forward=io_pars)
Esempio n. 24
0
 def get_rpu_config(self):
     return SingleRPUConfig(device=ConstantStepDevice(
         w_max_dtod=0, w_min_dtod=0, up_down_dtod=0.0))
Esempio n. 25
0
 def get_rpu_config(self):
     rpu_config = SingleRPUConfig(device=IdealDevice())
     rpu_config.forward.is_perfect = True
     rpu_config.backward.is_perfect = True
     return rpu_config
Esempio n. 26
0
 def get_rpu_config(self):
     return SingleRPUConfig(
         device=PowStepDevice(w_max_dtod=0, w_min_dtod=0))
Esempio n. 27
0
 def get_rpu_config(self):
     return SingleRPUConfig(
         device=SoftBoundsPmaxDevice(w_max_dtod=0, w_min_dtod=0))
Esempio n. 28
0
def plot_device_compact(device: PulsedDevice,
                        w_noise: float = 0.0,
                        n_steps: int = None,
                        n_traces: int = 3) -> Figure:
    """Plots a compact step response figure for a given device (preset).

    Note:
        It will use an amount of read weight noise ``w_noise`` for
        reading the weights.

    Args:
        device: PulsedDevice parameters
        w_noise: Weight noise standard deviation during read
        n_steps: Number of steps for up/down cycle
        n_traces: Number of traces to plot (for device-to-device variation)

    Returns:
        the compact step response figure.
    """
    # pylint: disable=too-many-locals,too-many-statements
    figure = plt.figure(figsize=[12, 4])

    # To simulate some weight read noise.
    io_pars = IOParameters(
        out_noise=0.0,  # no out noise
        w_noise=w_noise,  # quite low
        inp_res=-1.,  # turn off DAC
        out_bound=100.,  # not limiting
        out_res=-1.,  # turn off ADC
        bound_management=BoundManagementType.NONE,
        noise_management=NoiseManagementType.NONE,
        w_noise_type=WeightNoiseType.ADDITIVE_CONSTANT)

    rpu_config = SingleRPUConfig(device=device, forward=io_pars)

    if n_steps is None:
        n_steps = estimate_n_steps(rpu_config)

    use_cuda = False

    # Noisy tile response curves.
    n_loops = 2
    total_iters = n_loops * 2 * n_steps
    direction = np.sign(np.sin(np.pi * (np.arange(total_iters) + 1) / n_steps))

    analog_tile = get_tile_for_plotting(rpu_config,
                                        n_traces,
                                        use_cuda,
                                        noise_free=False)
    w_trace = compute_pulse_response(analog_tile, direction, use_forward=True)\
        .reshape(-1, n_traces)

    axis = figure.add_subplot(1, 1, 1)
    axis.plot(w_trace, linewidth=1)
    axis.set_title(analog_tile.rpu_config.device.__class__.__name__)
    axis.set_xlabel('Pulse number #')
    limit = np.abs(w_trace).max() * 1.2
    axis.set_ylim(-limit, limit)
    axis.set_xlim(0, total_iters - 1)
    axis.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))

    # Noise-free tile for statistics.
    n_loops = 1
    total_iters = min(max(n_loops * 2 * n_steps, 1000),
                      max(50000, 2 * n_steps))
    direction = np.sign(np.sin(np.pi * (np.arange(total_iters) + 1) / n_steps))

    analog_tile_noise_free = get_tile_for_plotting(rpu_config,
                                                   n_traces,
                                                   use_cuda,
                                                   noise_free=True)
    analog_tile_noise_free.set_hidden_parameters(
        analog_tile.get_hidden_parameters())

    w_trace = compute_pulse_response(analog_tile_noise_free, direction, False)

    # Compute statistics.
    num_nodes = min(n_steps, 100)
    w_nodes = np.linspace(w_trace.min(), w_trace.max(), num_nodes)

    dw_mean_up = compute_pulse_statistics(w_nodes, w_trace, direction, True)[0]\
        .reshape(-1, n_traces)
    dw_mean_down = compute_pulse_statistics(w_nodes, w_trace, direction, False)[0]\
        .reshape(-1, n_traces)

    # Plot mean up statistics.
    pos = axis.get_position().bounds
    space = 0.1
    gap = 0.01
    axis.set_position(
        [pos[0] + gap + space, pos[1], pos[2] - 2 * gap - 2 * space, pos[3]])
    axis.set_yticks([])

    axis_left = figure.add_axes([pos[0], pos[1], space, pos[3]])
    dw_mean_up = dw_mean_up.reshape(-1, n_traces)
    for i in range(n_traces):
        axis_left.plot(dw_mean_up[:, i], w_nodes)

    axis_left.set_position([pos[0], pos[1], space, pos[3]])
    axis_left.set_xlabel('Up pulse size')
    axis_left.set_ylabel('Weight \n [conductance]')
    axis_left.set_ylim(-limit, limit)

    # Plot mean down statistics.
    axis_right = figure.add_axes(
        [pos[0] + pos[2] - space, pos[1], space, pos[3]])
    dw_mean_down = dw_mean_down.reshape(-1, n_traces)
    for i in range(n_traces):
        axis_right.plot(np.abs(dw_mean_down[:, i]), w_nodes)

    axis_right.set_yticks([])
    axis_right.set_xlabel('Down pulse size')
    axis_right.set_ylim(-limit, limit)

    # Set xlim's.
    limit = np.maximum(np.nanmax(np.abs(dw_mean_down)),
                       np.nanmax(np.abs(dw_mean_up))) * 1.2
    axis_left.set_xlim(0.0, limit)
    axis_right.set_xlim(0.0, limit)

    return figure
Esempio n. 29
0
 def get_rpu_config(self):
     return SingleRPUConfig(device=IdealDevice())