Ejemplo n.º 1
0
def test_add_metrics_centralized() -> None:
    """Test add_metrics_centralized."""
    # Prepare
    history = History()

    # Execute
    history.add_metrics_centralized(rnd=0, metrics={"acc": 0.9})

    # Assert
    assert len(history.losses_distributed) == 0
    assert len(history.losses_centralized) == 0
    assert len(history.metrics_distributed) == 0
    assert len(history.metrics_centralized) == 1
    assert (0, 0.9) == history.metrics_centralized["acc"][0]
Ejemplo n.º 2
0
def test_add_loss_centralized() -> None:
    """Test add_loss_centralized."""
    # Prepare
    history = History()

    # Execute
    history.add_loss_centralized(rnd=0, loss=2.0)

    # Assert
    assert len(history.losses_distributed) == 0
    assert len(history.losses_centralized) == 1
    assert len(history.metrics_distributed) == 0
    assert len(history.metrics_centralized) == 0
    assert (0, 2.0) == history.losses_centralized[0]
Ejemplo n.º 3
0
    def fit(self, num_rounds: int) -> History:
        """Run federated averaging for a number of rounds."""
        history = History()

        # Initialize parameters
        log(INFO, "Initializing global parameters")
        self.parameters = self._get_initial_parameters()
        log(INFO, "Evaluating initial parameters")
        res = self.strategy.evaluate(parameters=self.parameters)
        if res is not None:
            log(
                INFO,
                "initial parameters (loss, other metrics): %s, %s",
                res[0],
                res[1],
            )
            history.add_loss_centralized(rnd=0, loss=res[0])
            history.add_metrics_centralized(rnd=0, metrics=res[1])

        # Run federated learning for num_rounds
        log(INFO, "FL starting")
        start_time = timeit.default_timer()

        for current_round in range(1, num_rounds + 1):
            # Train model and replace previous global model
            res_fit = self.fit_round(rnd=current_round)
            if res_fit:
                parameters_prime, _, _ = res_fit  # fit_metrics_aggregated
                if parameters_prime:
                    self.parameters = parameters_prime

            # Evaluate model using strategy implementation
            res_cen = self.strategy.evaluate(parameters=self.parameters)
            if res_cen is not None:
                loss_cen, metrics_cen = res_cen
                log(
                    INFO,
                    "fit progress: (%s, %s, %s, %s)",
                    current_round,
                    loss_cen,
                    metrics_cen,
                    timeit.default_timer() - start_time,
                )
                history.add_loss_centralized(rnd=current_round, loss=loss_cen)
                history.add_metrics_centralized(rnd=current_round,
                                                metrics=metrics_cen)

            # Evaluate model on a sample of available clients
            res_fed = self.evaluate_round(rnd=current_round)
            if res_fed:
                loss_fed, evaluate_metrics_fed, _ = res_fed
                if loss_fed:
                    history.add_loss_distributed(rnd=current_round,
                                                 loss=loss_fed)
                    history.add_metrics_distributed(
                        rnd=current_round, metrics=evaluate_metrics_fed)

        # Bookkeeping
        end_time = timeit.default_timer()
        elapsed = end_time - start_time
        log(INFO, "FL finished in %s", elapsed)
        return history
Ejemplo n.º 4
0
    def fit(self, num_rounds: int) -> History:
        """Run federated averaging for a number of rounds."""
        history = History()
        # Initialize weights by asking one client to return theirs
        self.weights = self._get_initial_weights()
        res = self.strategy.evaluate(weights=self.weights)
        if res is not None:
            log(
                INFO, "initial weights (loss/accuracy): %s, %s", res[0], res[1],
            )
            history.add_loss_centralized(rnd=0, loss=res[0])
            history.add_accuracy_centralized(rnd=0, acc=res[1])

        # Run federated learning for num_rounds
        log(INFO, "[TIME] FL starting")
        start_time = timeit.default_timer()

        for current_round in range(1, num_rounds + 1):
            # Train model and replace previous global model
            weights_prime = self.fit_round(rnd=current_round)
            if weights_prime is not None:
                self.weights = weights_prime

            # Evaluate model using strategy implementation
            res_cen = self.strategy.evaluate(weights=self.weights)
            if res_cen is not None:
                loss_cen, acc_cen = res_cen
                log(
                    INFO,
                    "fit progress: (%s, %s, %s, %s)",
                    current_round,
                    loss_cen,
                    acc_cen,
                    timeit.default_timer() - start_time,
                )
                history.add_loss_centralized(rnd=current_round, loss=loss_cen)
                history.add_accuracy_centralized(rnd=current_round, acc=acc_cen)

            # Evaluate model on a sample of available clients
            res_fed = self.evaluate(rnd=current_round)
            if res_fed is not None and res_fed[0] is not None:
                loss_fed, _ = res_fed
                history.add_loss_distributed(
                    rnd=current_round, loss=cast(float, loss_fed)
                )

            # Conclude round
            loss = res_cen[0] if res_cen is not None else None
            acc = res_cen[1] if res_cen is not None else None
            should_continue = self.strategy.on_conclude_round(current_round, loss, acc)
            if not should_continue:
                break

        end_time = timeit.default_timer()
        elapsed = end_time - start_time
        log(INFO, "[TIME] FL finished in %s", elapsed)
        return history