Beispiel #1
0
def create_analog_network(input_size, hidden_sizes, output_size):
    """Create the neural network using analog and digital layers.

    Args:
        input_size (int): size of the Tensor at the input.
        hidden_sizes (list): list of sizes of the hidden layers (2 layers).
        output_size (int): size of the Tensor at the output.

    Returns:
        nn.Module: created analog model
    """
    model = AnalogSequential(
        AnalogLinear(input_size,
                     hidden_sizes[0],
                     True,
                     rpu_config=InferenceRPUConfig()), nn.Sigmoid(),
        AnalogLinear(hidden_sizes[0],
                     hidden_sizes[1],
                     True,
                     rpu_config=InferenceRPUConfig()), nn.Sigmoid(),
        AnalogLinearMapped(hidden_sizes[1],
                           output_size,
                           True,
                           rpu_config=InferenceRPUConfig()),
        nn.LogSoftmax(dim=1))

    return model
Beispiel #2
0
    def __init__(self,
                 out_size: int,
                 in_size: int,
                 rpu_config: Optional[InferenceRPUConfig] = None,
                 bias: bool = False,
                 in_trans: bool = False,
                 out_trans: bool = False):
        rpu_config = rpu_config or InferenceRPUConfig()

        # Noise model.
        self.noise_model = deepcopy(rpu_config.noise_model)

        # Drift compensation.
        self.drift_compensation = None
        if rpu_config.drift_compensation:
            self.drift_compensation = deepcopy(rpu_config.drift_compensation)
        self.drift_baseline = None
        self.drift_readout_tensor = None  # type: Optional[Tensor]
        self.alpha = ones((1, ))

        # Helpers.
        self.reference_combined_weights = None  # type: Optional[Tensor]
        self.programmed_weights = None  # type: Optional[Tensor]
        self.nu_drift_list = None  # type: Optional[List[Tensor]]

        super().__init__(out_size, in_size, rpu_config, bias, in_trans,
                         out_trans)
Beispiel #3
0
    def __init__(self,
                 out_size: int,
                 in_size: int,
                 rpu_config: Optional['InferenceRPUConfig'] = None,
                 bias: bool = False,
                 in_trans: bool = False,
                 out_trans: bool = False):
        if not rpu_config:
            # Import `InferenceRPUConfig` dynamically to avoid import cycles.
            # pylint: disable=import-outside-toplevel
            from aihwkit.simulator.configs import InferenceRPUConfig
            rpu_config = InferenceRPUConfig()

        # Noise model.
        self.noise_model = deepcopy(rpu_config.noise_model)

        # Drift compensation.
        self.drift_compensation = None
        if rpu_config.drift_compensation:
            self.drift_compensation = deepcopy(rpu_config.drift_compensation)
        self.drift_baseline = None
        self.drift_readout_tensor = None  # type: Optional[Tensor]
        self.alpha = ones((1, ))

        # Helpers.
        self.reference_combined_weights = None  # type: Optional[Tensor]
        self.programmed_weights = None  # type: Optional[Tensor]
        self.nu_drift_list = None  # type: Optional[List[Tensor]]

        super().__init__(out_size, in_size, rpu_config, bias, in_trans,
                         out_trans)
Beispiel #4
0
    def __init__(
        self,
        input_size: int,
        hidden_size: int,
        bias: bool,
        rpu_config: Optional[RPUConfigAlias] = None,
        realistic_read_write: bool = False,
    ):
        super().__init__()

        # Default to InferenceRPUConfig
        if not rpu_config:
            rpu_config = InferenceRPUConfig()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.weight_ih = AnalogLinear(
            input_size,
            hidden_size,
            bias=bias,
            rpu_config=rpu_config,
            realistic_read_write=realistic_read_write)
        self.weight_hh = AnalogLinear(
            hidden_size,
            hidden_size,
            bias=bias,
            rpu_config=rpu_config,
            realistic_read_write=realistic_read_write)
Beispiel #5
0
 def get_rpu_config(self):
     return InferenceRPUConfig()
Beispiel #6
0
 def get_rpu_config(self):
     rpu_config = InferenceRPUConfig()
     rpu_config.forward.is_perfect = True
     return rpu_config
TRAIN_DATASET = os.path.join('data', 'TRAIN_DATASET')
TEST_DATASET = os.path.join('data', 'TEST_DATASET')

# Path to store results
RESULTS = os.path.join('results', 'LENET5')

# Training parameters
SEED = 1
N_EPOCHS = 30
BATCH_SIZE = 8
LEARNING_RATE = 0.01
N_CLASSES = 10

# Define the properties of the neural network in terms of noise simulated during
# the inference/training pass
RPU_CONFIG = InferenceRPUConfig()
RPU_CONFIG.forward.out_res = -1.  # Turn off (output) ADC discretization.
RPU_CONFIG.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
RPU_CONFIG.forward.w_noise = 0.02
RPU_CONFIG.noise_model = PCMLikeNoiseModel(g_max=25.0)


def load_images():
    """Load images for train from torchvision datasets."""
    transform = transforms.Compose([transforms.ToTensor()])
    train_set = datasets.MNIST(TRAIN_DATASET, download=True, train=True, transform=transform)
    val_set = datasets.MNIST(TEST_DATASET, download=True, train=False, transform=transform)
    train_data = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
    validation_data = torch.utils.data.DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)

    return train_data, validation_data
Beispiel #8
0
EPOCHS = 50
BATCH_SIZE = 5
SEQ_LEN = 501
RNN_CELL = AnalogGRUCell  # type of RNN cell
WITH_EMBEDDING = True  # RNN with embedding
WITH_BIDIR = False
USE_ANALOG_TRAINING = False  # or hardware-aware training
DEVICE = torch.device('cuda') if cuda.is_compiled() else torch.device('cpu')

if USE_ANALOG_TRAINING:
    # Define a RPU configuration for analog training
    rpu_config = GokmenVlasovPreset()

else:
    # Define an RPU configuration using inference/hardware-aware training tile
    rpu_config = InferenceRPUConfig()
    rpu_config.forward.out_res = -1.  # Turn off (output) ADC discretization.
    rpu_config.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
    rpu_config.forward.w_noise = 0.02  # Short-term w-noise.

    rpu_config.clip.type = WeightClipType.FIXED_VALUE
    rpu_config.clip.fixed_value = 1.0
    rpu_config.modifier.pdrop = 0.03  # Drop connect.
    rpu_config.modifier.type = WeightModifierType.ADD_NORMAL  # Fwd/bwd weight noise.
    rpu_config.modifier.std_dev = 0.1
    rpu_config.modifier.rel_to_actual_wmax = True

    # Inference noise model.
    rpu_config.noise_model = PCMLikeNoiseModel(g_max=25.0)

    # drift compensation
Beispiel #9
0
# Imports from aihwkit.
from aihwkit.nn import AnalogLinear
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.configs import InferenceRPUConfig
from aihwkit.simulator.configs.utils import (WeightNoiseType, WeightClipType,
                                             WeightModifierType)
from aihwkit.inference import PCMLikeNoiseModel, GlobalDriftCompensation
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Define a single-layer network, using inference/hardware-aware training tile
rpu_config = InferenceRPUConfig()
rpu_config.forward.out_res = -1.  # Turn off (output) ADC discretization.
rpu_config.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
rpu_config.forward.w_noise = 0.02  # Short-term w-noise.

rpu_config.clip.type = WeightClipType.FIXED_VALUE
rpu_config.clip.fixed_value = 1.0
rpu_config.modifier.pdrop = 0.03  # Drop connect.
rpu_config.modifier.type = WeightModifierType.ADD_NORMAL  # Fwd/bwd weight noise.
rpu_config.modifier.std_dev = 0.1
rpu_config.modifier.rel_to_actual_wmax = True

# Inference noise model.
rpu_config.noise_model = PCMLikeNoiseModel(g_max=25.0)

# drift compensation