Beispiel #1
0
def create_analog_network(input_size, hidden_sizes, output_size):
    """Create the neural network using analog and digital layers.

    Args:
        input_size (int): size of the Tensor at the input.
        hidden_sizes (list): list of sizes of the hidden layers (2 layers).
        output_size (int): size of the Tensor at the output.
    """
    model = AnalogSequential(
        AnalogLinear(input_size,
                     hidden_sizes[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[0],
                     hidden_sizes[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.Sigmoid(),
        AnalogLinear(hidden_sizes[1],
                     output_size,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        nn.LogSoftmax(dim=1))

    if USE_CUDA:
        model.cuda()

    print(model)
    return model
Beispiel #2
0
    def test_sequential_move_to_cuda(self):
        """Test moving AnalogSequential to cuda (from CPU)."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', current_device())

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda()

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Beispiel #3
0
    def test_sequential_move_to_cuda_multiple_gpus(self):
        """Test moving AnalogSequential to cuda (from CPU), using ``.to()``."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')
        if device_count() < 2:
            raise SkipTest('Need at least two devices for this test')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        # Test whether it can move to GPU with index 1
        expected_device_num = 1

        layer = self.get_layer()
        if isinstance(layer.analog_tile.tile.__class__,
                      (tiles.CudaAnalogTile, tiles.CudaFloatingPointTile)):
            raise SkipTest('Layer is already on CUDA')

        expected_class = tile_classes[layer.analog_tile.tile.__class__]
        expected_device = device('cuda', expected_device_num)

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda(device('cuda', expected_device_num))

        analog_tile = layer.analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Beispiel #4
0
    def test_sequential_move_to_cuda(self):
        """Test sequential cuda."""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            AnalogTile: CudaAnalogTile,
            CudaAnalogTile: CudaAnalogTile
        }

        layer = self.get_layer()
        expected_class = tile_classes[layer.analog_tile.__class__]

        # Create a container and move to cuda.
        model = AnalogSequential(layer)
        model.cuda()

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile, expected_class)
Beispiel #5
0
    def test_save_with_cuda(self):
        """Whether model is correctly reconstructed after saving"""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        model = AnalogSequential(layer)
        model.cuda()
        with TemporaryFile() as file:
            save(model.state_dict(), file)
            # Create a new model and load its state dict.
            file.seek(0)
            checkpoint = load(file)
        model.load_state_dict(checkpoint)

        expected_device = device('cuda', current_device())
        expected_class = tile_classes[layer.analog_tile.tile.__class__]

        analog_tile = model[0].analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Beispiel #6
0
    def test_analog_torch_optimizer(self):
        """Check analog layers with torch SGD for inference."""
        loss_func = mse_loss

        x_b = Tensor([[0.1, 0.2, 0.3, 0.4], [0.2, 0.4, 0.3, 0.1]])
        y_b = Tensor([[0.3], [0.6]])

        manual_seed(4321)
        model = Sequential(
            self.get_layer(4, 3),
            self.get_layer(3, 1),
        )
        if not isinstance(model[0].analog_tile.rpu_config, InferenceRPUConfig):
            return

        manual_seed(4321)
        model2 = AnalogSequential(
            AnalogLinear(4,
                         3,
                         rpu_config=FloatingPointRPUConfig(),
                         bias=self.bias),
            AnalogLinear(3,
                         1,
                         rpu_config=FloatingPointRPUConfig(),
                         bias=self.bias))

        if self.use_cuda:
            x_b = x_b.cuda()
            y_b = y_b.cuda()
            model = model.cuda()
            model2 = model2.cuda()

        initial_loss = loss_func(model(x_b), y_b)

        # train with SGD
        self.train_model_torch(model, loss_func, x_b, y_b)
        self.assertLess(loss_func(model(x_b), y_b), initial_loss)

        # train with AnalogSGD
        self.train_model(model2, loss_func, x_b, y_b)
        self.assertLess(loss_func(model2(x_b), y_b), initial_loss)
        final_loss = loss_func(model(x_b), y_b).detach().cpu().numpy()
        final_loss2 = loss_func(model2(x_b), y_b).detach().cpu().numpy()

        assert_array_almost_equal(final_loss, final_loss2)