def create_analog_network():
    """Returns a Vgg8 inspired analog model."""
    channel_base = 48
    channel = [channel_base, 2 * channel_base, 3 * channel_base]
    fc_size = 8 * channel_base
    model = AnalogSequential(
        nn.Conv2d(in_channels=3,
                  out_channels=channel[0],
                  kernel_size=3,
                  stride=1,
                  padding=1), nn.ReLU(),
        AnalogConv2d(in_channels=channel[0],
                     out_channels=channel[0],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[0]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=channel[0],
                     out_channels=channel[1],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=channel[1],
                     out_channels=channel[1],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[1]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=channel[1],
                     out_channels=channel[2],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=channel[2],
                     out_channels=channel[2],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[2]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        nn.Flatten(),
        AnalogLinear(in_features=16 * channel[2],
                     out_features=fc_size,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        nn.Linear(in_features=fc_size, out_features=N_CLASSES),
        nn.LogSoftmax(dim=1))
    return model
示例#2
0
    def __init__(self):
        super().__init__()

        self.feature_extractor = nn.Sequential(
            AnalogConv2d(in_channels=1,
                         out_channels=16,
                         kernel_size=5,
                         stride=1,
                         rpu_config=RPU_CONFIG), nn.Tanh(),
            nn.MaxPool2d(kernel_size=2),
            AnalogConv2d(in_channels=16,
                         out_channels=32,
                         kernel_size=5,
                         stride=1,
                         rpu_config=RPU_CONFIG), nn.Tanh(),
            nn.MaxPool2d(kernel_size=2), nn.Tanh())

        self.classifier = nn.Sequential(
            AnalogLinear(in_features=512,
                         out_features=128,
                         rpu_config=RPU_CONFIG),
            nn.Tanh(),
            AnalogLinear(in_features=128,
                         out_features=N_CLASSES,
                         rpu_config=RPU_CONFIG),
        )
示例#3
0
 def get_model(self, rpu_config: Any = TikiTakaEcRamPreset) -> Module:
     return AnalogSequential(
         Conv2d(in_channels=3,
                out_channels=48,
                kernel_size=3,
                stride=1,
                padding=1), ReLU(),
         AnalogConv2d(in_channels=48,
                      out_channels=48,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(48), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=48,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=96,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(96), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=96,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=144,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(144), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         Flatten(),
         AnalogLinear(in_features=16 * 144,
                      out_features=384,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         Linear(in_features=384, out_features=10), LogSoftmax(dim=1))
示例#4
0
 def get_layer(self, in_channels=2, out_channels=3, **kwargs):
     kwargs.setdefault('rpu_config', self.get_rpu_config())
     kwargs.setdefault('bias', self.bias)
     kwargs.setdefault('kernel_size', [3, 3])
     kwargs.setdefault('padding', 2)
     kwargs['rpu_config'].mapping.digital_bias = self.digital_bias
     return AnalogConv2d(in_channels, out_channels, **kwargs)
示例#5
0
    def get_layer(self, in_channels=2, out_channels=3, kernel_size=4, padding=2, **kwargs):
        kwargs.setdefault('rpu_config', self.get_rpu_config())
        kwargs.setdefault('bias', self.bias)

        return AnalogConv2d(in_channels, out_channels, kernel_size,
                            padding=padding,
                            **kwargs).cuda()
示例#6
0
 def get_model(self, rpu_config: Any = ReRamSBPreset) -> Module:
     return AnalogSequential(
         AnalogConv2d(in_channels=3,
                      out_channels=16,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config()), Tanh(),
         MaxPool2d(kernel_size=2),
         AnalogConv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config()), Tanh(),
         MaxPool2d(kernel_size=2), Tanh(), Flatten(),
         AnalogLinear(in_features=800,
                      out_features=128,
                      rpu_config=rpu_config()), Tanh(),
         AnalogLinear(in_features=128,
                      out_features=10,
                      rpu_config=rpu_config()), LogSoftmax(dim=1))
示例#7
0
def create_analog_network():
    """Return a LeNet5 inspired analog model."""
    channel = [16, 32, 512, 128]
    model = AnalogSequential(
        AnalogConv2d(in_channels=1, out_channels=channel[0], kernel_size=5, stride=1,
                     rpu_config=RPU_CONFIG),
        nn.Tanh(),
        nn.MaxPool2d(kernel_size=2),
        AnalogConv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=5, stride=1,
                     rpu_config=RPU_CONFIG),
        nn.Tanh(),
        nn.MaxPool2d(kernel_size=2),
        nn.Tanh(),
        nn.Flatten(),
        AnalogLinear(in_features=channel[2], out_features=channel[3], rpu_config=RPU_CONFIG),
        nn.Tanh(),
        AnalogLinear(in_features=channel[3], out_features=N_CLASSES, rpu_config=RPU_CONFIG),
        nn.LogSoftmax(dim=1)
    )

    return model
示例#8
0
 def get_model(self, rpu_config: Any = EcRamPreset) -> Module:
     return AnalogSequential(
         AnalogConv2d(in_channels=1,
                      out_channels=16,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         MaxPool2d(kernel_size=2),
         AnalogConv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         MaxPool2d(kernel_size=2), Tanh(), Flatten(),
         AnalogLinear(in_features=512,
                      out_features=128,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         AnalogLinear(in_features=128,
                      out_features=10,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), LogSoftmax(dim=1))
from torch import nn

# Imports from aihwkit.
from aihwkit.nn import AnalogLinear, AnalogConv2d, AnalogSequential
from aihwkit.simulator.configs import SingleRPUConfig
from aihwkit.simulator.configs.devices import ConstantStepDevice
from aihwkit.utils.analog_info import analog_summary

# Define a single-layer network, using a constant step device type.
rpu_config = SingleRPUConfig(device=ConstantStepDevice())

channel = [16, 32, 512, 128]
model = AnalogSequential(
    AnalogConv2d(in_channels=1,
                 out_channels=channel[0],
                 kernel_size=5,
                 stride=1,
                 rpu_config=rpu_config), nn.Tanh(),
    nn.MaxPool2d(kernel_size=2),
    AnalogConv2d(in_channels=channel[0],
                 out_channels=channel[1],
                 kernel_size=5,
                 stride=1,
                 rpu_config=rpu_config), nn.Tanh(),
    nn.MaxPool2d(kernel_size=2), nn.Tanh(), nn.Flatten(),
    AnalogLinear(in_features=channel[2],
                 out_features=channel[3],
                 rpu_config=rpu_config), nn.Tanh(),
    AnalogLinear(in_features=channel[3],
                 out_features=10,
                 rpu_config=rpu_config), nn.LogSoftmax(dim=1))
示例#10
0
def VGG8():
    """VGG8 inspired analog model."""
    model = AnalogSequential(
        AnalogConv2d(in_channels=3,
                     out_channels=128,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=128,
                     out_channels=128,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(128), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=128,
                     out_channels=256,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=256,
                     out_channels=256,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(256), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=256,
                     out_channels=512,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=512,
                     out_channels=512,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(512), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        nn.Flatten(),
        AnalogLinear(in_features=8192,
                     out_features=1024,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogLinear(in_features=1024,
                     out_features=N_CLASSES,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.LogSoftmax(dim=1))

    return model