def test_predict(self):
        """Test for predict"""
        from torch.utils.data import DataLoader

        data_loader = DataLoader(TorchDataset([1], [1]),
                                 batch_size=1,
                                 shuffle=False)
        model = TorchConnector(self._qnn, [1])
        optimizer = Adam(model.parameters(), lr=0.1)
        loss_func = MSELoss(reduction="sum")
        # Default arguments (no provider or backend)
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
        )
        with self.assertRaises(ValueError):
            torch_runtime_client.predict(data_loader)
        # Test for the predict result
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._infer_provider,
            backend=self._backend,
        )
        result = torch_runtime_client.predict(data_loader)

        self.validate_infer_result(result)
    def test_score(self):
        """Test for score"""
        from torch.utils.data import DataLoader

        data_loader = DataLoader(TorchDataset([1], [1]),
                                 batch_size=1,
                                 shuffle=False)
        model = TorchConnector(self._qnn, [1])
        optimizer = Adam(model.parameters(), lr=0.1)
        loss_func = MSELoss(reduction="sum")
        # Default arguments (no provider or backend)
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
        )
        with self.assertRaises(ValueError):
            torch_runtime_client.score(data_loader, score_func="regression")
        # Test for the score result
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._infer_provider,
            backend=self._backend,
        )
        # Test different score functions
        result = torch_runtime_client.score(data_loader,
                                            score_func="regression")
        self.validate_infer_result(result, score=True)
        result = torch_runtime_client.score(data_loader,
                                            score_func="classification")
        self.validate_infer_result(result, score=True)

        def score_classification(output: Tensor, target: Tensor) -> float:
            pred = output.argmax(dim=1, keepdim=True)
            correct = pred.eq(target.view_as(pred)).sum().item()
            return correct

        result = torch_runtime_client.score(data_loader,
                                            score_func=score_classification)
        self.validate_infer_result(result, score=True)
    def test_fit(self):
        """Test for fit"""
        from torch.utils.data import DataLoader

        train_loader = DataLoader(TorchDataset([1], [1]),
                                  batch_size=1,
                                  shuffle=False)
        model = TorchConnector(self._qnn, [1])
        optimizer = Adam(model.parameters(), lr=0.1)
        loss_func = MSELoss(reduction="sum")
        # Default arguments (no provider or backend)
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
        )
        with self.assertRaises(ValueError):
            torch_runtime_client.fit(train_loader)

        # Default arguments for fit
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._trainer_provider,
            backend=self._backend,
        )
        result = torch_runtime_client.fit(train_loader)
        self.validate_train_result(result)

        # Specify arguments
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._trainer_provider,
            epochs=1,
            backend=self._backend,
            shots=1024,
        )
        result = torch_runtime_client.fit(train_loader, start_epoch=0, seed=42)
        self.validate_train_result(result)
    def test_fit_with_validation_set(self):
        """Test for fit with a validation set"""
        from torch.utils.data import DataLoader

        train_loader = DataLoader(TorchDataset([1], [1]),
                                  batch_size=1,
                                  shuffle=False)
        model = TorchConnector(self._qnn, [1])
        optimizer = Adam(model.parameters(), lr=0.1)
        loss_func = MSELoss(reduction="sum")
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._trainer_provider,
            backend=self._backend,
        )
        validation_loader = DataLoader(TorchDataset([0], [0]),
                                       batch_size=1,
                                       shuffle=False)
        result = torch_runtime_client.fit(train_loader,
                                          val_loader=validation_loader)
        self.validate_train_result(result, val_loader=True)
    def test_fit_with_hooks(self):
        """Test for fit with hooks"""
        from torch.utils.data import DataLoader

        train_loader = DataLoader(TorchDataset([1], [1]),
                                  batch_size=1,
                                  shuffle=False)
        model = TorchConnector(self._qnn, [1])
        optimizer = Adam(model.parameters(), lr=0.1)
        loss_func = MSELoss(reduction="sum")
        torch_runtime_client = TorchRuntimeClient(
            model=model,
            optimizer=optimizer,
            loss_func=loss_func,
            provider=self._trainer_provider,
            backend=self._backend,
        )
        hook = HookBase()
        # a single hook
        result = torch_runtime_client.fit(train_loader, hooks=hook)
        self.validate_train_result(result)
        # a hook list
        result = torch_runtime_client.fit(train_loader, hooks=[hook, hook])
        self.validate_train_result(result)
    def test_circuit_qnn_batch_gradients(self, config):
        """Test batch gradient computation of CircuitQNN gives the same result as the sum of
        individual gradients."""
        import torch
        from torch.nn import MSELoss
        from torch.optim import SGD

        output_shape, interpret = config
        num_inputs = 2

        feature_map = ZZFeatureMap(num_inputs)
        ansatz = RealAmplitudes(num_inputs, entanglement="linear", reps=1)

        qc = QuantumCircuit(num_inputs)
        qc.append(feature_map, range(num_inputs))
        qc.append(ansatz, range(num_inputs))

        qnn = CircuitQNN(
            qc,
            input_params=feature_map.parameters,
            weight_params=ansatz.parameters,
            interpret=interpret,
            output_shape=output_shape,
            quantum_instance=self._sv_quantum_instance,
        )

        # set up PyTorch module
        initial_weights = np.array([0.1] * qnn.num_weights)
        model = TorchConnector(qnn, initial_weights)
        model.to(self._device)

        # random data set
        x = torch.rand(5, 2)
        y = torch.rand(5, output_shape)

        # define optimizer and loss
        optimizer = SGD(model.parameters(), lr=0.1)
        f_loss = MSELoss(reduction="sum")

        sum_of_individual_losses = 0.0
        for x_i, y_i in zip(x, y):
            x_i = x_i.to(self._device)
            y_i = y_i.to(self._device)
            output = model(x_i)
            sum_of_individual_losses += f_loss(output, y_i)
        optimizer.zero_grad()
        sum_of_individual_losses.backward()
        sum_of_individual_gradients = model.weight.grad.detach().cpu()

        x = x.to(self._device)
        y = y.to(self._device)
        output = model(x)
        batch_loss = f_loss(output, y)
        optimizer.zero_grad()
        batch_loss.backward()
        batch_gradients = model.weight.grad.detach().cpu()

        self.assertAlmostEqual(np.linalg.norm(sum_of_individual_gradients -
                                              batch_gradients),
                               0.0,
                               places=4)

        self.assertAlmostEqual(
            sum_of_individual_losses.detach().cpu().numpy(),
            batch_loss.detach().cpu().numpy(),
            places=4,
        )