コード例 #1
0
# Imports from aihwkit.
from aihwkit.nn.conversion import convert_to_analog_mapped
from aihwkit.simulator.presets import TikiTakaReRamSBPreset
from aihwkit.simulator.configs.utils import MappingParameter

# Example: Load a predefined model from pytorch library and convert to
#          its analog version.

# Load a pytorch model.
model = resnet34()
print(model)

# Define device and chip configuration used in the RPU tile
mapping = MappingParameter(
    max_input_size=512,  # analog tile size
    max_output_size=512,
    digital_bias=True,
    weight_scaling_omega=0.6)  # whether to use analog or digital bias
# Choose any preset or RPU configuration here
rpu_config = TikiTakaReRamSBPreset(mapping=mapping)

# Convert the model to its analog version.
# this will replace ``Linear`` layers with ``AnalogLinearMapped``
model = convert_to_analog_mapped(model, rpu_config)

# Note: One can also use ``convert_to_analog`` instead to convert
# ``Linear`` to ``AnalogLinear`` (without mapping to multiple tiles)

print(model)
コード例 #2
0
from matplotlib import animation

from aihwkit.nn import AnalogLinear, AnalogSequential
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.presets import MixedPrecisionEcRamMOPreset
from aihwkit.simulator.rpu_base import cuda

# Select the device model to use in the training.

# There are a number of presets available in
# aihwkit.simulator.presets. This is will also determine the analog
# optimizer used (e.g. mixed precision or full analog update)

# As an example we use a mixed precision preset using an ECRAM device model
from aihwkit.simulator.configs.utils import MappingParameter
mapping = MappingParameter(weight_scaling_omega=0.8)
RPU_CONFIG = MixedPrecisionEcRamMOPreset(mapping=mapping)

# Set your parameters
SEED = 1
N_EPOCHS = 200
Z_DIM = 64
DISPLAY_STEP = 500
BATCH_SIZE = 256
LR = 2e-2

# Check device
USE_CUDA = 0
if cuda.is_compiled():
    USE_CUDA = 1
DEVICE = torch.device('cuda' if USE_CUDA else 'cpu')
コード例 #3
0
ファイル: 11_vgg8_training.py プロジェクト: kaoutar55/aihwkit
# Path to store results
RESULTS = os.path.join(os.getcwd(), 'results', 'VGG8')


# Training parameters
SEED = 1
N_EPOCHS = 20
BATCH_SIZE = 128
LEARNING_RATE = 0.1
N_CLASSES = 10
WEIGHT_SCALING_OMEGA = 0.6  # Should not be larger than max weight.

# Select the device model to use in the training. In this case we are using one of the preset,
# but it can be changed to a number of preset to explore possible different analog devices
mapping = MappingParameter(weight_scaling_omega=WEIGHT_SCALING_OMEGA)
RPU_CONFIG = GokmenVlasovPreset(mapping=mapping)


def load_images():
    """Load images for train from torchvision datasets."""
    mean = Tensor([0.4377, 0.4438, 0.4728])
    std = Tensor([0.1980, 0.2010, 0.1970])

    print(f'Normalization data: ({mean},{std})')

    transform = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize(mean, std)])
    train_set = datasets.SVHN(PATH_DATASET, download=True, split='train', transform=transform)
    val_set = datasets.SVHN(PATH_DATASET, download=True, split='test', transform=transform)
    train_data = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
コード例 #4
0
 def get_rpu_config(self, **kwargs):
     kwargs.setdefault('mapping', MappingParameter(max_input_size=4, max_output_size=3))
     return super().get_rpu_config(**kwargs)