class TikiTakaIdealizedPreset(UnitCellRPUConfig): """Configuration using Tiki-taka with :class:`~aihwkit.simulator.presets.devices.IdealizedPresetDevice`. See :class:`~aihwkit.simulator.configs.devices.TransferCompound` for details on Tiki-taka-like optimizers. The default peripheral hardware (:class:`~aihwkit.simulator.presets.utils.PresetIOParameters`) and analog update (:class:`~aihwkit.simulator.presets.utils.PresetUpdateParameters`) configuration is used otherwise. """ device: UnitCell = field( default_factory=lambda: TransferCompound( unit_cell_devices=[IdealizedPresetDevice(), IdealizedPresetDevice()], transfer_forward=PresetIOParameters(), transfer_update=PresetUpdateParameters(), transfer_every=1.0, units_in_mbatch=True, )) forward: IOParameters = field(default_factory=PresetIOParameters) backward: IOParameters = field(default_factory=PresetIOParameters) update: UpdateParameters = field(default_factory=PresetUpdateParameters)
def get_transfer_compound(gamma, **kwargs): """Get a Tiki-taka compound with reference cell """ def custom_device(**kwargs): """Custom device """ return SoftBoundsDevice(w_max_dtod=0.0, w_min_dtod=0.0, w_max=1.0, w_min=-1.0, **kwargs) rpu_config = UnitCellRPUConfig(device=TransferCompound( # Devices that compose the Tiki-taka compound. unit_cell_devices=[ # fast "A" matrix ReferenceUnitCell( [custom_device(**kwargs), custom_device(**kwargs)]), # slow "C" matrix ReferenceUnitCell( [custom_device(**kwargs), custom_device(**kwargs)]) ], gamma=gamma, )) return rpu_config
def get_rpu_config(self): return UnitCellRPUConfig( device=TransferCompound(unit_cell_devices=[ SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0), SoftBoundsDevice(w_max_dtod=0, w_min_dtod=0) ], transfer_forward=IOParameters( is_perfect=True), transfer_every=1, gamma=0.1))
from aihwkit.simulator.rpu_base import cuda # Prepare the datasets (input and expected output). x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]]) y = Tensor([[1.0, 0.5], [0.7, 0.3]]) # The Tiki-taka learning rule can be implemented using the transfer device. rpu_config = UnitCellRPUConfig( device=TransferCompound( # Devices that compose the Tiki-taka compound. unit_cell_devices=[ SoftBoundsDevice(w_min=-0.3, w_max=0.3), SoftBoundsDevice(w_min=-0.6, w_max=0.6) ], # Make some adjustments of the way Tiki-Taka is performed. units_in_mbatch=True, # batch_size=1 anyway transfer_every=2, # every 2 batches do a transfer-read n_cols_per_transfer=1, # one forward read for each transfer gamma=0.0, # all SGD weight in second device scale_transfer_lr=True, # in relative terms to SGD LR transfer_lr=1.0, # same transfer LR as for SGD ) ) # Make more adjustments (can be made here or above). rpu_config.forward.inp_res = 1/64. # 6 bit DAC # same backward pass settings as forward rpu_config.backward = rpu_config.forward
from aihwkit.simulator.rpu_base import cuda # Prepare the datasets (input and expected output). x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]]) y = Tensor([[1.0, 0.5], [0.7, 0.3]]) # The Tiki-taka learning rule can be implemented using the transfer device. rpu_config = UnitCellRPUConfig(device=TransferCompound( # Devices that compose the Tiki-taka compound. unit_cell_devices=[ SoftBoundsDevice(w_min=-0.3, w_max=0.3), SoftBoundsDevice(w_min=-0.6, w_max=0.6) ], # Make some adjustments of the way Tiki-Taka is performed. units_in_mbatch=True, # batch_size=1 anyway transfer_every=2, # every 2 batches do a transfer-read n_reads_per_transfer=1, # one forward read for each transfer gamma=0.0, # all SGD weight in second device scale_transfer_lr=True, # in relative terms to SGD LR transfer_lr=1.0, # same transfer LR as for SGD fast_lr=0.1, # SGD update onto first matrix constant transfer_columns=True # transfer use columns (not rows) )) # Make more adjustments (can be made here or above). rpu_config.forward.inp_res = 1 / 64. # 6 bit DAC # same backward pass settings as forward rpu_config.backward = rpu_config.forward