Example #1
0
def create_analog_network(input_size, hidden_sizes, output_size):
    """Create the neural network using analog and digital layers.

    Args:
        input_size (int): size of the Tensor at the input.
        hidden_sizes (list): list of sizes of the hidden layers (2 layers).
        output_size (int): size of the Tensor at the output.
    """
    model = AnalogSequential(
        AnalogLinear(input_size,
                     hidden_sizes[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[0],
                     hidden_sizes[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[1],
                     output_size,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.LogSoftmax(dim=1))

    if USE_CUDA:
        model.cuda()

    print(model)
    return model
Example #2
0
    def test_sequential_move_to_cuda_via_to(self):
        """Test moving AnalogSequential to cuda (from CPU), using ``.to()``."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', current_device())

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.to(device('cuda'))

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Example #3
0
    def test_sequential_move_to_cpu_via_to(self):
        """Test moving AnalogSequential to CPU (from CPU), using ``.to()``."""
        layer = self.get_layer()

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.to(device('cpu'))

        # Assert the tile is still on CPU.
        self.assertIsInstance(layer.analog_tile, AnalogTile)
Example #4
0
def create_analog_network(input_size, hidden_sizes, output_size):
    """Create the neural network using analog and digital layers.

    Args:
        input_size (int): size of the Tensor at the input.
        hidden_sizes (list): list of sizes of the hidden layers (2 layers).
        output_size (int): size of the Tensor at the output.

    Returns:
        nn.Module: created analog model
    """
    model = AnalogSequential(
        AnalogLinear(input_size,
                     hidden_sizes[0],
                     True,
                     rpu_config=InferenceRPUConfig()), nn.Sigmoid(),
        AnalogLinear(hidden_sizes[0],
                     hidden_sizes[1],
                     True,
                     rpu_config=InferenceRPUConfig()), nn.Sigmoid(),
        AnalogLinearMapped(hidden_sizes[1],
                           output_size,
                           True,
                           rpu_config=InferenceRPUConfig()),
        nn.LogSoftmax(dim=1))

    return model
def main():
    """Create and execute an experiment."""
    model = AnalogSequential(
        Flatten(),
        AnalogLinear(INPUT_SIZE,
                     HIDDEN_SIZES[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[0],
                     HIDDEN_SIZES[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[1],
                     OUTPUT_SIZE,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        LogSoftmax(dim=1))

    # Create the training Experiment.
    experiment = BasicTrainingWithScheduler(dataset=FashionMNIST,
                                            model=model,
                                            epochs=EPOCHS,
                                            batch_size=BATCH_SIZE)

    # Create the runner and execute the experiment.
    runner = LocalRunner(device=DEVICE)
    results = runner.run(experiment, dataset_root=PATH_DATASET)
    print(results)
def create_analog_network():
    """Returns a Vgg8 inspired analog model."""
    channel_base = 48
    channel = [channel_base, 2 * channel_base, 3 * channel_base]
    fc_size = 8 * channel_base
    model = AnalogSequential(
        nn.Conv2d(in_channels=3,
                  out_channels=channel[0],
                  kernel_size=3,
                  stride=1,
                  padding=1), nn.ReLU(),
        AnalogConv2d(in_channels=channel[0],
                     out_channels=channel[0],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[0]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=channel[0],
                     out_channels=channel[1],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=channel[1],
                     out_channels=channel[1],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[1]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=channel[1],
                     out_channels=channel[2],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=channel[2],
                     out_channels=channel[2],
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(channel[2]), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        nn.Flatten(),
        AnalogLinear(in_features=16 * channel[2],
                     out_features=fc_size,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        nn.Linear(in_features=fc_size, out_features=N_CLASSES),
        nn.LogSoftmax(dim=1))
    return model
Example #7
0
    def test_analog_torch_optimizer(self):
        """Check analog layers with torch SGD for inference."""
        loss_func = mse_loss

        x_b = Tensor([[0.1, 0.2, 0.3, 0.4], [0.2, 0.4, 0.3, 0.1]])
        y_b = Tensor([[0.3], [0.6]])

        manual_seed(4321)
        model = Sequential(
            self.get_layer(4, 3),
            self.get_layer(3, 1),
        )
        if not isinstance(model[0].analog_tile.rpu_config, InferenceRPUConfig):
            return

        manual_seed(4321)
        model2 = AnalogSequential(
            AnalogLinear(4,
                         3,
                         rpu_config=FloatingPointRPUConfig(),
                         bias=self.bias),
            AnalogLinear(3,
                         1,
                         rpu_config=FloatingPointRPUConfig(),
                         bias=self.bias))

        if self.use_cuda:
            x_b = x_b.cuda()
            y_b = y_b.cuda()
            model = model.cuda()
            model2 = model2.cuda()

        initial_loss = loss_func(model(x_b), y_b)

        # train with SGD
        self.train_model_torch(model, loss_func, x_b, y_b)
        self.assertLess(loss_func(model(x_b), y_b), initial_loss)

        # train with AnalogSGD
        self.train_model(model2, loss_func, x_b, y_b)
        self.assertLess(loss_func(model2(x_b), y_b), initial_loss)
        final_loss = loss_func(model(x_b), y_b).detach().cpu().numpy()
        final_loss2 = loss_func(model2(x_b), y_b).detach().cpu().numpy()

        assert_array_almost_equal(final_loss, final_loss2)
Example #8
0
    def test_sequential_move_to_cuda_multiple_gpus(self):
        """Test moving AnalogSequential to cuda (from CPU), using ``.to()``."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')
        if device_count() < 2:
            raise SkipTest('Need at least two devices for this test')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        # Test whether it can move to GPU with index 1
        expected_device_num = 1

        layer = self.get_layer()
        if isinstance(layer.analog_tile.tile.__class__,
                      (tiles.CudaAnalogTile, tiles.CudaFloatingPointTile)):
            raise SkipTest('Layer is already on CUDA')

        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', expected_device_num)

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda(device('cuda', expected_device_num))

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Example #9
0
    def test_sequential_move_to_cuda(self):
        """Test sequential cuda."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            AnalogTile: CudaAnalogTile,
            CudaAnalogTile: CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.__class__]

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda()

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile, expected_class)
Example #10
0
 def get_model(self, rpu_config: Any = TikiTakaReRamSBPreset) -> Module:
     return AnalogSequential(
         Flatten(),
         AnalogLinear(784, 256, bias=True, rpu_config=rpu_config()),
         Sigmoid(),
         AnalogLinear(256, 128, bias=True, rpu_config=rpu_config()),
         Sigmoid(), AnalogLinear(128,
                                 10,
                                 bias=True,
                                 rpu_config=rpu_config()),
         LogSoftmax(dim=1))
Example #11
0
 def get_model(self, rpu_config: Any = TikiTakaEcRamPreset) -> Module:
     return AnalogSequential(
         Conv2d(in_channels=3,
                out_channels=48,
                kernel_size=3,
                stride=1,
                padding=1), ReLU(),
         AnalogConv2d(in_channels=48,
                      out_channels=48,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(48), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=48,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=96,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(96), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=96,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=144,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(144), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         Flatten(),
         AnalogLinear(in_features=16 * 144,
                      out_features=384,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         Linear(in_features=384, out_features=10), LogSoftmax(dim=1))
Example #12
0
    def test_sequential_move_to_cpu(self):
        """Test moving AnalogSequential to CPU (from CPU)."""
        layer = self.get_layer()

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cpu()

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, device('cpu'))
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         device('cpu'))

        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             device('cpu'))
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_d_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_x_size())

        # Assert the tile is still on CPU.
        self.assertIsInstance(layer.analog_tile.tile, tiles.AnalogTile)
Example #13
0
 def __init__(self, z_dim=10, im_dim=784, hidden_dim=128):
     super().__init__()
     # Build the neural network.
     self.gen = AnalogSequential(
         get_generator_block(z_dim, hidden_dim),
         get_generator_block(hidden_dim, hidden_dim * 2),
         get_generator_block(hidden_dim * 2, hidden_dim * 4),
         get_generator_block(hidden_dim * 4, hidden_dim * 8),
         AnalogLinear(hidden_dim * 8,
                      im_dim,
                      bias=True,
                      rpu_config=RPU_CONFIG),
         nn.Sigmoid(),
     )
Example #14
0
    def _model_from_proto(model_proto: Any) -> Module:
        layers = []
        for layer_proto in model_proto.layers:
            if layer_proto.WhichOneof('item') == 'layer':
                layer_cls = InverseMappings.layers[layer_proto.layer.id]
                layer = Mappings.layers[layer_cls].from_proto(
                    layer_proto.layer, layer_cls)
            else:
                layer_cls = InverseMappings.activation_functions[
                    layer_proto.activation_function.id]
                layer = Mappings.activation_functions[layer_cls].from_proto(
                    layer_proto.activation_function, layer_cls)

            layers.append(layer)

        return AnalogSequential(*layers)
Example #15
0
def get_generator_block(input_dim, output_dim):
    """Return a block of the generator's neural network given input and output
    dimensions.

    Args:
        input_dim (int): the dimension of the input vector, a scalar
        output_dim (int): the dimension of the output vector, a scalar

    Returns:
        n.Module: a generator neural network layer, with a linear transformation
            followed by a batch normalization and then a relu activation
    """
    return AnalogSequential(
        AnalogLinear(input_dim, output_dim, bias=True, rpu_config=RPU_CONFIG),
        nn.BatchNorm1d(output_dim),
        nn.ReLU(inplace=True),
    )
Example #16
0
 def get_model(self, rpu_config: Any = ReRamSBPreset) -> Module:
     return AnalogSequential(
         AnalogConv2d(in_channels=3,
                      out_channels=16,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config()), Tanh(),
         MaxPool2d(kernel_size=2),
         AnalogConv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config()), Tanh(),
         MaxPool2d(kernel_size=2), Tanh(), Flatten(),
         AnalogLinear(in_features=800,
                      out_features=128,
                      rpu_config=rpu_config()), Tanh(),
         AnalogLinear(in_features=128,
                      out_features=10,
                      rpu_config=rpu_config()), LogSoftmax(dim=1))
Example #17
0
def create_analog_network():
    """Return a LeNet5 inspired analog model."""
    channel = [16, 32, 512, 128]
    model = AnalogSequential(
        AnalogConv2d(in_channels=1, out_channels=channel[0], kernel_size=5, stride=1,
                     rpu_config=RPU_CONFIG),
        nn.Tanh(),
        nn.MaxPool2d(kernel_size=2),
        AnalogConv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=5, stride=1,
                     rpu_config=RPU_CONFIG),
        nn.Tanh(),
        nn.MaxPool2d(kernel_size=2),
        nn.Tanh(),
        nn.Flatten(),
        AnalogLinear(in_features=channel[2], out_features=channel[3], rpu_config=RPU_CONFIG),
        nn.Tanh(),
        AnalogLinear(in_features=channel[3], out_features=N_CLASSES, rpu_config=RPU_CONFIG),
        nn.LogSoftmax(dim=1)
    )

    return model
Example #18
0
 def get_model(self, rpu_config: Any = EcRamPreset) -> Module:
     return AnalogSequential(
         AnalogConv2d(in_channels=1,
                      out_channels=16,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         MaxPool2d(kernel_size=2),
         AnalogConv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=5,
                      stride=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         MaxPool2d(kernel_size=2), Tanh(), Flatten(),
         AnalogLinear(in_features=512,
                      out_features=128,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), Tanh(),
         AnalogLinear(in_features=128,
                      out_features=10,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.6), LogSoftmax(dim=1))
Example #19
0
    def test_save_with_cuda(self):
        """Whether model is correctly reconstructed after saving"""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        model = AnalogSequential(layer)
        model.cuda()
        with TemporaryFile() as file:
            save(model.state_dict(), file)
            # Create a new model and load its state dict.
            file.seek(0)
            checkpoint = load(file)
        model.load_state_dict(checkpoint)

        expected_device = device('cuda', current_device())
        expected_class = tile_classes[layer.analog_tile.tile.__class__]

        analog_tile = model[0].analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
from aihwkit.simulator.configs.devices import ConstantStepDevice
from aihwkit.utils.analog_info import analog_summary

# Define a single-layer network, using a constant step device type.
rpu_config = SingleRPUConfig(device=ConstantStepDevice())

channel = [16, 32, 512, 128]
model = AnalogSequential(
    AnalogConv2d(in_channels=1,
                 out_channels=channel[0],
                 kernel_size=5,
                 stride=1,
                 rpu_config=rpu_config), nn.Tanh(),
    nn.MaxPool2d(kernel_size=2),
    AnalogConv2d(in_channels=channel[0],
                 out_channels=channel[1],
                 kernel_size=5,
                 stride=1,
                 rpu_config=rpu_config), nn.Tanh(),
    nn.MaxPool2d(kernel_size=2), nn.Tanh(), nn.Flatten(),
    AnalogLinear(in_features=channel[2],
                 out_features=channel[3],
                 rpu_config=rpu_config), nn.Tanh(),
    AnalogLinear(in_features=channel[3],
                 out_features=10,
                 rpu_config=rpu_config), nn.LogSoftmax(dim=1))
# TODO: add mapped examples

# print(model.__class__.__name__)
analog_summary(model, (1, 1, 28, 28))
Example #21
0
    def test_load_state_load_rpu_config_sequential(self):
        """Test creating a new model using a state dict, while using a different RPU config."""

        # Create the device and the array.
        rpu_config_org = self.get_rpu_config()

        # Skipped for FP
        if isinstance(rpu_config_org, FloatingPointRPUConfig):
            raise SkipTest('Not available for FP')

        rpu_config_org.forward.is_perfect = False
        old_value = 0.11
        rpu_config_org.forward.inp_noise = old_value

        model = AnalogSequential(self.get_layer(rpu_config=rpu_config_org))
        state_dict = model.state_dict()

        rpu_config = deepcopy(rpu_config_org)
        new_value = 0.51
        rpu_config.forward.inp_noise = new_value

        # Test restore_rpu_config=False
        new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
        new_model.load_state_dict(state_dict, load_rpu_config=False)
        new_analog_tile = self.get_analog_tile(new_model[0])

        parameters = new_analog_tile.tile.get_parameters()
        self.assertAlmostEqual(parameters.forward_io.inp_noise, new_value)

        # Test restore_rpu_config=True
        new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
        new_model.load_state_dict(state_dict, load_rpu_config=True)
        new_analog_tile = self.get_analog_tile(new_model[0])

        parameters = new_analog_tile.tile.get_parameters()
        self.assertAlmostEqual(parameters.forward_io.inp_noise, old_value)
Example #22
0
def VGG8():
    """VGG8 inspired analog model."""
    model = AnalogSequential(
        AnalogConv2d(in_channels=3,
                     out_channels=128,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=128,
                     out_channels=128,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(128), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=128,
                     out_channels=256,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=256,
                     out_channels=256,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(256), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        AnalogConv2d(in_channels=256,
                     out_channels=512,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogConv2d(in_channels=512,
                     out_channels=512,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.BatchNorm2d(512), nn.ReLU(),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
        nn.Flatten(),
        AnalogLinear(in_features=8192,
                     out_features=1024,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA), nn.ReLU(),
        AnalogLinear(in_features=1024,
                     out_features=N_CLASSES,
                     rpu_config=RPU_CONFIG,
                     weight_scaling_omega=WEIGHT_SCALING_OMEGA),
        nn.LogSoftmax(dim=1))

    return model