Example #1
0
    def test_save_with_cuda(self):
        """Whether model is correctly reconstructed after saving"""
        if not cuda.is_compiled():
            raise SkipTest('not compiled with CUDA support')

        # Map the original tile classes to the expected ones after `cuda()`.
        tile_classes = {
            tiles.AnalogTile: tiles.CudaAnalogTile,
            tiles.CudaAnalogTile: tiles.CudaAnalogTile
        }

        layer = self.get_layer()
        model = AnalogSequential(layer)
        model.cuda()
        with TemporaryFile() as file:
            save(model.state_dict(), file)
            # Create a new model and load its state dict.
            file.seek(0)
            checkpoint = load(file)
        model.load_state_dict(checkpoint)

        expected_device = device('cuda', current_device())
        expected_class = tile_classes[layer.analog_tile.tile.__class__]

        analog_tile = model[0].analog_tile
        self.assertEqual(analog_tile.device, expected_device)
        self.assertEqual(analog_tile.get_analog_ctx().data.device,
                         expected_device)
        if analog_tile.shared_weights is not None:
            self.assertEqual(analog_tile.shared_weights.data.device,
                             expected_device)
            self.assertEqual(analog_tile.shared_weights.data.size()[0],
                             analog_tile.tile.get_x_size())
            self.assertEqual(analog_tile.shared_weights.data.size()[1],
                             analog_tile.tile.get_d_size())

        # Assert the tile has been moved to cuda.
        self.assertIsInstance(layer.analog_tile.tile, expected_class)
Example #2
0
    def test_load_state_load_rpu_config_sequential(self):
        """Test creating a new model using a state dict, while using a different RPU config."""

        # Create the device and the array.
        rpu_config_org = self.get_rpu_config()

        # Skipped for FP
        if isinstance(rpu_config_org, FloatingPointRPUConfig):
            raise SkipTest('Not available for FP')

        rpu_config_org.forward.is_perfect = False
        old_value = 0.11
        rpu_config_org.forward.inp_noise = old_value

        model = AnalogSequential(self.get_layer(rpu_config=rpu_config_org))
        state_dict = model.state_dict()

        rpu_config = deepcopy(rpu_config_org)
        new_value = 0.51
        rpu_config.forward.inp_noise = new_value

        # Test restore_rpu_config=False
        new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
        new_model.load_state_dict(state_dict, load_rpu_config=False)
        new_analog_tile = self.get_analog_tile(new_model[0])

        parameters = new_analog_tile.tile.get_parameters()
        self.assertAlmostEqual(parameters.forward_io.inp_noise, new_value)

        # Test restore_rpu_config=True
        new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
        new_model.load_state_dict(state_dict, load_rpu_config=True)
        new_analog_tile = self.get_analog_tile(new_model[0])

        parameters = new_analog_tile.tile.get_parameters()
        self.assertAlmostEqual(parameters.forward_io.inp_noise, old_value)