Ejemplo n.º 1
0
    def get_model_and_x(self):
        """Trains a simple model."""
        # Prepare the datasets (input and expected output).
        x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
        y = Tensor([[1.0, 0.5], [0.7, 0.3]])

        # Define a single-layer network, using a constant step device type.
        rpu_config = self.get_rpu_config()
        rpu_config.forward.out_res = -1.  # Turn off (output) ADC discretization.
        rpu_config.forward.w_noise_type = WeightNoiseType.ADDITIVE_CONSTANT
        rpu_config.forward.w_noise = 0.02
        rpu_config.noise_model = PCMLikeNoiseModel(g_max=25.0)

        model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

        # Move the model and tensors to cuda if it is available.
        if self.use_cuda:
            x = x.cuda()
            y = y.cuda()
            model.cuda()

        # Define an analog-aware optimizer, preparing it for using the layers.
        opt = AnalogSGD(model.parameters(), lr=0.1)
        opt.regroup_param_groups(model)

        for _ in range(100):
            opt.zero_grad()

            # Add the training Tensor to the model (input).
            pred = model(x)
            # Add the expected output Tensor.
            loss = mse_loss(pred, y)
            # Run training (backward propagation).
            loss.backward()

            opt.step()

        return model, x
Ejemplo n.º 2
0
from aihwkit.simulator.rpu_base import cuda

# Prepare the datasets (input and expected output).
x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
y = Tensor([[1.0, 0.5], [0.7, 0.3]])

# Define a single-layer network, using a constant step device type.
rpu_config = SingleRPUConfig(device=ConstantStepDevice())
model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)

# Move the model and tensors to cuda if it is available.
if cuda.is_compiled():
    x = x.cuda()
    y = y.cuda()
    model.cuda()

# Define an analog-aware optimizer, preparing it for using the layers.
opt = AnalogSGD(model.parameters(), lr=0.1)
opt.regroup_param_groups(model)

for epoch in range(100):
    # Add the training Tensor to the model (input).
    pred = model(x)
    # Add the expected output Tensor.
    loss = mse_loss(pred, y)
    # Run training (backward propagation).
    loss.backward()

    opt.step()
    print('Loss error: {:.16f}'.format(loss))
Ejemplo n.º 3
0
    def test_against_fp(self):
        """Test whether FP is same as is_perfect inference tile."""
        # pylint: disable-msg=too-many-locals
        # Prepare the datasets (input and expected output).
        x = Tensor([[0.1, 0.2, 0.4, 0.3], [0.2, 0.1, 0.1, 0.3]])
        y = Tensor([[1.0, 0.5], [0.7, 0.3]])

        # Define a single-layer network, using a constant step device type.
        rpu_config = self.get_rpu_config()
        rpu_config.forward.is_perfect = True
        model_torch = Linear(4, 2, bias=True)
        model = AnalogLinear(4, 2, bias=True, rpu_config=rpu_config)
        model.set_weights(model_torch.weight, model_torch.bias)
        model_fp = AnalogLinear(4,
                                2,
                                bias=True,
                                rpu_config=FloatingPointRPUConfig())
        model_fp.set_weights(model_torch.weight, model_torch.bias)

        self.assertTensorAlmostEqual(model.get_weights()[0],
                                     model_torch.weight)
        self.assertTensorAlmostEqual(model.get_weights()[0],
                                     model_fp.get_weights()[0])

        # Move the model and tensors to cuda if it is available.
        if self.use_cuda:
            x = x.cuda()
            y = y.cuda()
            model.cuda()
            model_fp.cuda()
            model_torch.cuda()

        # Define an analog-aware optimizer, preparing it for using the layers.
        opt = AnalogSGD(model.parameters(), lr=0.1)
        opt_fp = AnalogSGD(model_fp.parameters(), lr=0.1)
        opt_torch = SGD(model_torch.parameters(), lr=0.1)

        for _ in range(100):

            # inference
            opt.zero_grad()
            pred = model(x)
            loss = mse_loss(pred, y)
            loss.backward()
            opt.step()

            # same for fp
            opt_fp.zero_grad()
            pred_fp = model_fp(x)
            loss_fp = mse_loss(pred_fp, y)
            loss_fp.backward()
            opt_fp.step()

            # same for torch
            opt_torch.zero_grad()
            pred_torch = model_torch(x)
            loss_torch = mse_loss(pred_torch, y)
            loss_torch.backward()
            opt_torch.step()

            self.assertTensorAlmostEqual(pred_torch, pred)
            self.assertTensorAlmostEqual(loss_torch, loss)
            self.assertTensorAlmostEqual(model.get_weights()[0],
                                         model_torch.weight)

            self.assertTensorAlmostEqual(pred_fp, pred)
            self.assertTensorAlmostEqual(loss_fp, loss)
            self.assertTensorAlmostEqual(model.get_weights()[0],
                                         model_fp.get_weights()[0])