Ejemplo n.º 1
0
 def get_rpu_config(self):
     return UnitCellRPUConfig(
         device=TransferCompoundDevice(unit_cell_devices=[
             SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0),
             SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0)
         ],
                                       transfer_forward=IOParameters(
                                           is_perfect=True)))
Ejemplo n.º 2
0
 def custom_device(**kwargs):
     """Custom device """
     return SoftBoundsDevice(w_max_dtod=0.0,
                             w_min_dtod=0.0,
                             w_max=1.0,
                             w_min=-1.0,
                             **kwargs)
Ejemplo n.º 3
0
 def get_rpu_config(self):
     return DigitalRankUpdateRPUConfig(device=MixedPrecisionCompound(
         device=SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0),
         transfer_every=1), )
Ejemplo n.º 4
0
 def get_rpu_config(self):
     return UnitCellRPUConfig(device=ReferenceUnitCell(unit_cell_devices=[
         SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0),
         SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0)
     ]))
Ejemplo n.º 5
0
 def get_rpu_config(self):
     return SingleRPUConfig(
         device=SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0))
from aihwkit.simulator.configs.devices import (
    TransferCompound,
    SoftBoundsDevice)
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# The Tiki-taka learning rule can be implemented using the transfer device.
rpu_config = UnitCellRPUConfig(
    device=TransferCompound(

        # Devices that compose the Tiki-taka compound.
        unit_cell_devices=[
            SoftBoundsDevice(w_min=-0.3, w_max=0.3),
            SoftBoundsDevice(w_min=-0.6, w_max=0.6)
        ],

        # Make some adjustments of the way Tiki-Taka is performed.
        units_in_mbatch=True,    # batch_size=1 anyway
        transfer_every=2,        # every 2 batches do a transfer-read
        n_cols_per_transfer=1,   # one forward read for each transfer
        gamma=0.0,               # all SGD weight in second device
        scale_transfer_lr=True,  # in relative terms to SGD LR
        transfer_lr=1.0,         # same transfer LR as for SGD
    )
)

# Make more adjustments (can be made here or above).
rpu_config.forward.inp_res = 1/64.   # 6 bit DAC
from aihwkit.simulator.configs.devices import (MixedPrecisionCompound,
                                               SoftBoundsDevice)
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Select the device model to use in the training. While one can use a
# presets as well, we here build up the RPU config from more basic
# devices. We use the relevant RPU config for using a digital rank
# update and transfer to analog device (like in mixed precision) and
# set it to a mixed precision compound which in turn uses a
# ConstantStep analog device:
rpu_config = DigitalRankUpdateRPUConfig(
    device=MixedPrecisionCompound(device=SoftBoundsDevice(), ))

# print the config (default values are omitted)
print('\nPretty-print of non-default settings:\n')
print(rpu_config)

print('\nInfo about all settings:\n')
print(repr(rpu_config))

model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

# a more detailed printout of the instantiated
print('\nInfo about the instantiated C++ tile:\n')
print(model.analog_tile.tile)

# Move the model and tensors to cuda if it is available.
Ejemplo n.º 8
0
from aihwkit.simulator.configs.devices import (MixedPrecisionCompound,
                                               SoftBoundsDevice)
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Select the device model to use in the training. While one can use a
# presets as well, we here build up the RPU config from more basic
# devices. We use the relevant RPU config for using a digital rank
# update and transfer to analog device (like in mixed precision) and
# set it to a mixed precision compound which in turn uses a
# ConstantStep analog device:
rpu_config = DigitalRankUpdateRPUConfig(device=MixedPrecisionCompound(
    device=SoftBoundsDevice(),
    # adjust quantization level (0 means FP)
    n_x_bins=5,  # quantization bins of the digital rank update (activation)
    n_d_bins=3  # quantization bins of the digital rank update (error)
))

# print the config (default values are omitted)
print('\nPretty-print of non-default settings:\n')
print(rpu_config)

print('\nInfo about all settings:\n')
print(repr(rpu_config))

model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

# a more detailed printout of the instantiated
    SoftBoundsDevice,
    ReferenceUnitCell)
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Define a single-layer network, using a vector device having multiple
# devices per crosspoint. Each device can be arbitrarily defined

rpu_config = UnitCellRPUConfig()
# 3 arbitrary devices per cross-point.
rpu_config.device = VectorUnitCell(
    unit_cell_devices=[
        ReferenceUnitCell(unit_cell_devices=[SoftBoundsDevice(w_max=1.0)]),
        ConstantStepDevice(),
        LinearStepDevice(w_max_dtod=0.4),
        SoftBoundsDevice()
    ])

# Only one of the devices should receive a single update.
# That is selected randomly, the effective weights is the sum of all
# weights.
rpu_config.device.update_policy = VectorUnitCellUpdatePolicy.SINGLE_RANDOM


model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

print(rpu_config)
                                               SoftBoundsDevice)
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Define a single-layer network, using a vector device having multiple
# devices per crosspoint. Each device can be arbitrarily defined

rpu_config = UnitCellRPUConfig()
# 3 arbitrary devices per cross-point.
rpu_config.device = VectorUnitCellDevice(unit_cell_devices=[
    ConstantStepDevice(),
    LinearStepDevice(w_max_dtod=0.4),
    SoftBoundsDevice()
])

# Only one of the devices should receive a single update.
rpu_config.device.single_device_update = True
# That is selected randomly, the effective weights is the sum of all
# weights.
rpu_config.device.single_device_update_random = True

model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

print(model.analog_tile.tile)

# Move the model and tensors to cuda if it is available.
if cuda.is_compiled():
    x = x.cuda()