def test_out_scaling_alpha_learning_columnwise(self):
        """Check if out scaling alpha are learning."""
        rpu_config = InferenceRPUConfig(
            mapping=MappingParameter(weight_scaling_omega=0.6,
                                     learn_out_scaling_alpha=True,
                                     weight_scaling_omega_columnwise=True))

        analog_model = Sequential(
            self.get_layer(in_channels=2,
                           out_channels=2,
                           kernel_size=4,
                           padding=2,
                           rpu_config=rpu_config),
            self.get_layer(in_channels=2,
                           out_channels=3,
                           kernel_size=4,
                           padding=2,
                           rpu_config=rpu_config))

        loss_func = mse_loss
        y_b = randn(3, 3, 6, 6, 6)
        x_b = randn(3, 2, 4, 4, 4)

        if self.use_cuda:
            y_b = y_b.cuda()
            x_b = x_b.cuda()

        initial_out_scaling_alpha_0 = analog_model[
            0].analog_tile.get_out_scaling_alpha().clone()
        initial_out_scaling_alpha_1 = analog_model[
            1].analog_tile.get_out_scaling_alpha().clone()

        self.train_model(analog_model, loss_func, x_b, y_b)

        learned_out_scaling_alpha_0 = analog_model[
            0].analog_tile.get_out_scaling_alpha().clone()
        learned_out_scaling_alpha_1 = analog_model[
            1].analog_tile.get_out_scaling_alpha().clone()

        self.assertGreaterEqual(initial_out_scaling_alpha_0.numel(), 1)
        self.assertIsNotNone(
            analog_model[0].analog_tile.get_out_scaling_alpha().grad)
        self.assertNotAlmostEqualTensor(initial_out_scaling_alpha_0,
                                        learned_out_scaling_alpha_0)

        self.assertGreaterEqual(initial_out_scaling_alpha_1.numel(), 1)
        self.assertIsNotNone(
            analog_model[1].analog_tile.get_out_scaling_alpha().grad)
        self.assertNotAlmostEqualTensor(initial_out_scaling_alpha_1,
                                        learned_out_scaling_alpha_1)
Пример #2
0
    def test_out_scaling_alpha_learning_columnwise(self):
        """Check if out scaling alpha are learning when columnwise is True."""
        loss_func = mse_loss

        x_b = Tensor([[0.1, 0.2, 0.3, 0.4], [0.2, 0.4, 0.3, 0.1]])
        y_b = Tensor([[0.3], [0.6]])

        manual_seed(4321)

        rpu_config = InferenceRPUConfig(
            mapping=MappingParameter(weight_scaling_omega=0.6,
                                     learn_out_scaling_alpha=True,
                                     weight_scaling_omega_columnwise=True))

        model = Sequential(self.get_layer(4, 2, rpu_config=rpu_config),
                           self.get_layer(2, 1, rpu_config=rpu_config))
        if self.use_cuda:
            x_b = x_b.cuda()
            y_b = y_b.cuda()
            model = model.cuda()

        initial_out_scaling_alpha_0 = model[
            0].analog_tile.get_out_scaling_alpha().clone()
        initial_out_scaling_alpha_1 = model[
            1].analog_tile.get_out_scaling_alpha().clone()

        self.train_model(model, loss_func, x_b, y_b)

        learned_out_scaling_alpha_0 = model[
            0].analog_tile.get_out_scaling_alpha().clone()
        learned_out_scaling_alpha_1 = model[
            1].analog_tile.get_out_scaling_alpha().clone()

        self.assertGreaterEqual(initial_out_scaling_alpha_0.numel(), 1)
        self.assertIsNotNone(model[0].analog_tile.get_out_scaling_alpha().grad)
        self.assertNotAlmostEqualTensor(initial_out_scaling_alpha_0,
                                        learned_out_scaling_alpha_0)

        self.assertGreaterEqual(initial_out_scaling_alpha_1.numel(), 1)
        self.assertIsNotNone(model[1].analog_tile.get_out_scaling_alpha().grad)
        self.assertNotAlmostEqualTensor(initial_out_scaling_alpha_1,
                                        learned_out_scaling_alpha_1)