Пример #1
0
def test_aggregate_fit() -> None:
    """Tests if adagrad fucntion is aggregating correclty."""
    # Prepare
    previous_weights: Weights = [array([0.1, 0.1, 0.1, 0.1], dtype=float32)]
    strategy = FedAdagrad(
        eta=0.1, eta_l=0.316, tau=0.5, initial_parameters=previous_weights
    )
    param_0: Parameters = weights_to_parameters(
        [array([0.2, 0.2, 0.2, 0.2], dtype=float32)]
    )
    param_1: Parameters = weights_to_parameters(
        [array([1.0, 1.0, 1.0, 1.0], dtype=float32)]
    )
    bridge = MagicMock()
    client_0 = GrpcClientProxy(cid="0", bridge=bridge)
    client_1 = GrpcClientProxy(cid="1", bridge=bridge)
    results: List[Tuple[ClientProxy, FitRes]] = [
        (
            client_0,
            FitRes(param_0, num_examples=5, num_examples_ceil=5, fit_duration=0.1),
        ),
        (
            client_1,
            FitRes(param_1, num_examples=5, num_examples_ceil=5, fit_duration=0.1),
        ),
    ]
    expected: Weights = [array([0.15, 0.15, 0.15, 0.15], dtype=float32)]

    # Execute
    actual_list = strategy.aggregate_fit(rnd=1, results=results, failures=[])
    if actual_list:
        actual = actual_list[0]
    assert (actual == expected[0]).all()
Пример #2
0
    def aggregate_fit(
        self,
        rnd: int,
        results: List[Tuple[ClientProxy, FitRes]],
        failures: List[BaseException],
    ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
        """Aggregate fit results using weighted average."""
        if not results:
            return None, {}
        # Do not aggregate if there are failures and failures are not accepted
        if not self.accept_failures and failures:
            return None, {}
        # Convert results

        def norm_grad(grad_list: List[Weights]) -> float:
            # input: nested gradients
            # output: square of the L-2 norm
            client_grads = grad_list[0]
            for i in range(1, len(grad_list)):
                client_grads = np.append(
                    client_grads, grad_list[i]
                )  # output a flattened array
            return float(np.sum(np.square(client_grads)))

        deltas = []
        hs_ffl = []

        if self.pre_weights is None:
            raise Exception("QffedAvg pre_weights are None in aggregate_fit")

        weights_before = self.pre_weights
        eval_result = self.evaluate(weights_to_parameters(weights_before))
        if eval_result is not None:
            loss, _ = eval_result

        for _, fit_res in results:
            new_weights = parameters_to_weights(fit_res.parameters)
            # plug in the weight updates into the gradient
            grads = [
                (u - v) * 1.0 / self.learning_rate
                for u, v in zip(weights_before, new_weights)
            ]
            deltas.append(
                [np.float_power(loss + 1e-10, self.q_param) * grad for grad in grads]
            )
            # estimation of the local Lipschitz constant
            hs_ffl.append(
                self.q_param
                * np.float_power(loss + 1e-10, (self.q_param - 1))
                * norm_grad(grads)
                + (1.0 / self.learning_rate)
                * np.float_power(loss + 1e-10, self.q_param)
            )

        weights_aggregated: Weights = aggregate_qffl(weights_before, deltas, hs_ffl)
        return weights_to_parameters(weights_aggregated), {}
Пример #3
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        results = self.numpy_client.fit(parameters, ins.config)
        if len(results) == 2:
            print(DEPRECATION_WARNING_FIT)
            results = cast(Tuple[List[np.ndarray], int], results)
            parameters_prime, num_examples = results
            metrics: Optional[Metrics] = None
        elif len(results) == 3:
            results = cast(Tuple[List[np.ndarray], int, Metrics], results)
            parameters_prime, num_examples, metrics = results

        # Return FitRes
        fit_duration = timeit.default_timer() - fit_begin
        parameters_prime_proto = weights_to_parameters(parameters_prime)
        return FitRes(
            parameters=parameters_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples,  # Deprecated
            fit_duration=fit_duration,  # Deprecated
            metrics=metrics,
        )
Пример #4
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        weights: Weights = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        results = self.keras_client.fit(weights, ins.config)
        if len(results) == 3:
            results = cast(Tuple[List[np.ndarray], int, int], results)
            weights_prime, num_examples, num_examples_ceil = results
            metrics: Optional[Metrics] = None
        elif len(results) == 4:
            results = cast(Tuple[List[np.ndarray], int, int, Metrics], results)
            weights_prime, num_examples, num_examples_ceil, metrics = results

        # Return FitRes
        fit_duration = timeit.default_timer() - fit_begin
        weights_prime_proto = weights_to_parameters(weights_prime)
        return FitRes(
            parameters=weights_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples_ceil,
            fit_duration=fit_duration,
            metrics=metrics,
        )
Пример #5
0
    def on_configure_evaluate(
        self, rnd: int, weights: Weights, client_manager: ClientManager
    ) -> List[Tuple[ClientProxy, EvaluateIns]]:
        """Configure the next round of evaluation."""
        # Do not configure federated evaluation if a centralized evaluation
        # function is provided
        if self.eval_fn is not None:
            return []

        # Parameters and config
        parameters = weights_to_parameters(weights)
        config = {}
        if self.on_evaluate_config_fn is not None:
            # Custom evaluation config function provided
            config = self.on_evaluate_config_fn(rnd)
        evaluate_ins = (parameters, config)

        # Sample clients
        sample_size, min_num_clients = self.num_evaluation_clients(
            client_manager.num_available())
        clients = client_manager.sample(num_clients=sample_size,
                                        min_num_clients=min_num_clients)

        # Return client/config pairs
        return [(client, evaluate_ins) for client in clients]
Пример #6
0
 def initialize_parameters(
         self, client_manager: ClientManager) -> Optional[Parameters]:
     """Initialize global model parameters."""
     initial_parameters = self.initial_parameters
     self.initial_parameters = None  # Don't keep initial parameters in memory
     if isinstance(initial_parameters, list):
         initial_parameters = weights_to_parameters(
             weights=initial_parameters)
     return initial_parameters
Пример #7
0
 def initialize_parameters(
     self, client_manager: ClientManager
 ) -> Optional[Parameters]:
     """Initialize global model parameters."""
     initial_parameters = self.initial_parameters
     self.initial_parameters = None  # Don't keep initial parameters in memory
     if isinstance(initial_parameters, list):
         log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)
         initial_parameters = weights_to_parameters(weights=initial_parameters)
     return initial_parameters
Пример #8
0
    def fit_round(
        self, rnd: int
    ) -> Optional[Tuple[Optional[Parameters], Dict[str, Scalar],
                        FitResultsAndFailures]]:
        """Perform a single round of federated averaging."""

        # Get clients and their respective instructions from strategy
        client_instructions = self.strategy.configure_fit(
            rnd=rnd,
            parameters=self.parameters,
            client_manager=self._client_manager)

        if not client_instructions:
            log(INFO, "fit_round: no clients selected, cancel")
            return None
        log(
            DEBUG,
            "fit_round: strategy sampled %s clients (out of %s)",
            len(client_instructions),
            self._client_manager.num_available(),
        )

        # Collect `fit` results from all clients participating in this round
        results, failures = fit_clients(client_instructions)
        log(
            DEBUG,
            "fit_round received %s results and %s failures",
            len(results),
            len(failures),
        )

        # Aggregate training results
        aggregated_result: Union[Tuple[Optional[Parameters], Dict[str,
                                                                  Scalar]],
                                 Optional[Weights],  # Deprecated
                                 ] = self.strategy.aggregate_fit(
                                     rnd, results, failures)

        metrics_aggregated: Dict[str, Scalar] = {}
        if aggregated_result is None:
            # Backward-compatibility, this will be removed in a future update
            log(WARNING, DEPRECATION_WARNING_FIT_ROUND)
            parameters_aggregated = None
        elif isinstance(aggregated_result, list):
            # Backward-compatibility, this will be removed in a future update
            log(WARNING, DEPRECATION_WARNING_FIT_ROUND)
            parameters_aggregated = weights_to_parameters(aggregated_result)
        else:
            parameters_aggregated, metrics_aggregated = aggregated_result

        return parameters_aggregated, metrics_aggregated, (results, failures)
Пример #9
0
    def aggregate_fit(
        self,
        rnd: int,
        results: List[Tuple[ClientProxy, FitRes]],
        failures: List[BaseException],
    ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
        """Aggregate fit results using weighted average."""
        if not results:
            return None, {}

        # Check if enough results are available
        completion_rate = len(results) / (len(results) + len(failures))
        if completion_rate < self.min_completion_rate_fit:
            # Not enough results for aggregation
            return None, {}

        # Convert results
        weights_results = [(parameters_to_weights(fit_res.parameters),
                            fit_res.num_examples)
                           for client, fit_res in results]
        weights_prime = aggregate(weights_results)

        if self.importance_sampling:
            # Track contributions to the global model
            for client, fit_res in results:
                cid = client.cid
                assert fit_res.num_examples_ceil is not None
                contribution: Tuple[int, int, int] = (
                    rnd,
                    fit_res.num_examples,
                    fit_res.num_examples_ceil,
                )
                if cid not in self.contributions.keys():
                    self.contributions[cid] = []
                self.contributions[cid].append(contribution)

        if self.dynamic_timeout:
            self.durations = []
            for client, fit_res in results:
                assert fit_res.fit_duration is not None
                assert fit_res.num_examples_ceil is not None
                cid_duration = (
                    client.cid,
                    fit_res.fit_duration,
                    fit_res.num_examples,
                    fit_res.num_examples_ceil,
                )
                self.durations.append(cid_duration)

        return weights_to_parameters(weights_prime), {}
 def aggregate_fit(self, rnd: int, results: List[Tuple[ClientProxy,
                                                       FitRes]],
                   failures: List[BaseException]):
     # get step
     config = self.on_fit_config_fn(rnd)
     # discriminate the aggregation to be performed
     if config['model'] == 'k-FED':
         # initial checks
         if not results:
             return None, {}
         # Do not aggregate if there are failures and failures are not accepted
         if not self.accept_failures and failures:
             return None, {}
         # getting all centroids --> (n_clients, n_centroids, n_dimensions)
         all_centroids = np.array([
             parameters_to_weights(fit_res.parameters)
             for _, fit_res in results
         ])
         print('All centroids\' shape: {}'.format(all_centroids.shape))
         # pick, randomly, one client's centroids
         idx = self.rng.integers(0, all_centroids.shape[0], 1)
         # basis to be completed
         base_centroids = all_centroids[idx][0]
         # all other centroids
         other_centroids = all_centroids[
             np.arange(len(all_centroids)) != idx]
         other_centroids = np.concatenate(other_centroids, axis=0)
         # loop for completing the basis
         while base_centroids.shape[0] < config['n_clusters']:
             # all distances from the basis of centroids
             distances = [
                 distance_from_centroids(base_centroids, c)
                 for c in other_centroids
             ]
             # get the index of the maximum distance
             idx = np.argmax(distances)
             # add the new centroid --> (n_centroids, n_dimensions)
             base_centroids = np.concatenate(
                 (base_centroids, [other_centroids[idx]]), axis=0)
             print(base_centroids.shape)
         # Save base_centroids
         print(f"Saving base centroids...")
         np.savez("base_centroids.npz", *base_centroids)
         return weights_to_parameters(base_centroids), {}
     else:
         aggregated_weights = super().aggregate_fit(rnd, results, failures)
         # Save aggregated_weights
         print("Saving aggregated weights...")
         np.savez(agg_weights_filename, *aggregated_weights)
         return aggregated_weights
Пример #11
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        weights: Weights = parameters_to_weights(ins[0])
        config = ins[1]

        # Train
        fit_begin = timeit.default_timer()
        weights_prime, num_examples, num_examples_ceil = self.keras_client.fit(
            weights, config)
        fit_duration = timeit.default_timer() - fit_begin

        # Return FitRes
        parameters = weights_to_parameters(weights_prime)
        return parameters, num_examples, num_examples_ceil, fit_duration
Пример #12
0
 def aggregate_fit(
     self,
     rnd: int,
     results: List[Tuple[ClientProxy, FitRes]],
     failures: List[BaseException],
 ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
     """Aggregate fit results using weighted average."""
     if not results:
         return None, {}
     # Do not aggregate if there are failures and failures are not accepted
     if not self.accept_failures and failures:
         return None, {}
     # Convert results
     weights_results = [(parameters_to_weights(fit_res.parameters),
                         fit_res.num_examples)
                        for client, fit_res in results]
     return weights_to_parameters(aggregate(weights_results)), {}
Пример #13
0
    def aggregate_fit(
        self,
        rnd: int,
        results: List[Tuple[ClientProxy, FitRes]],
        failures: List[BaseException],
    ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
        """Aggregate fit results using weighted average."""
        fedavg_parameters_aggregated, metrics_aggregated = super(
        ).aggregate_fit(rnd=rnd, results=results, failures=failures)
        if fedavg_parameters_aggregated is None:
            return None, {}

        fedavg_weights_aggregate = parameters_to_weights(
            fedavg_parameters_aggregated)

        # Yogi
        delta_t = [
            x - y
            for x, y in zip(fedavg_weights_aggregate, self.current_weights)
        ]

        # m_t
        if not self.m_t:
            self.m_t = [np.zeros_like(x) for x in delta_t]
        self.m_t = [
            self.beta_1 * x + (1 - self.beta_1) * y
            for x, y in zip(self.m_t, delta_t)
        ]

        # v_t
        if not self.v_t:
            self.v_t = [np.zeros_like(x) for x in delta_t]
        self.v_t = [
            x - (1.0 - self.beta_2) * np.multiply(y, y) *
            np.sign(x - np.multiply(y, y)) for x, y in zip(self.v_t, delta_t)
        ]

        new_weights = [
            x + self.eta * y / (np.sqrt(z) + self.tau)
            for x, y, z in zip(self.current_weights, self.m_t, self.v_t)
        ]

        self.current_weights = new_weights

        return weights_to_parameters(self.current_weights), metrics_aggregated
Пример #14
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        parameters_prime, num_examples = self.numpy_client.fit(parameters, ins.config)
        fit_duration = timeit.default_timer() - fit_begin

        # Return FitRes
        parameters_prime_proto = weights_to_parameters(parameters_prime)
        return FitRes(
            parameters=parameters_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples,  # num_examples == num_examples_ceil
            fit_duration=fit_duration,
        )
Пример #15
0
    def on_configure_fit(
            self, rnd: int, weights: Weights,
            client_manager: ClientManager) -> List[Tuple[ClientProxy, FitIns]]:
        """Configure the next round of training."""
        parameters = weights_to_parameters(weights)
        config = {}
        if self.on_fit_config_fn is not None:
            # Custom fit config function provided
            config = self.on_fit_config_fn(rnd)
        fit_ins = FitIns(parameters, config)

        # Sample clients
        sample_size, min_num_clients = self.num_fit_clients(
            client_manager.num_available())
        clients = client_manager.sample(num_clients=sample_size,
                                        min_num_clients=min_num_clients)

        # Return client/config pairs
        return [(client, fit_ins) for client in clients]
Пример #16
0
 def aggregate_fit(
     self,
     rnd: int,
     results: List[Tuple[ClientProxy, FitRes]],
     failures: List[BaseException],
 ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
     """Aggregate fit results using weighted average."""
     if not results:
         return None, {}
     # Check if enough results are available
     completion_rate = len(results) / (len(results) + len(failures))
     if completion_rate < self.completion_rate_fit:
         # Not enough results for aggregation
         return None, {}
     # Convert results
     weights_results = [(parameters_to_weights(fit_res.parameters),
                         fit_res.num_examples)
                        for client, fit_res in results]
     return weights_to_parameters(aggregate(weights_results)), {}
Пример #17
0
    def configure_fit(
        self, rnd: int, weights: Weights, client_manager: ClientManager
    ) -> List[Tuple[ClientProxy, FitIns]]:
        """Configure the next round of training."""

        # Block until `min_num_clients` are available
        sample_size, min_num_clients = self.num_fit_clients(
            client_manager.num_available()
        )
        success = client_manager.wait_for(
            num_clients=min_num_clients, timeout=WAIT_TIMEOUT
        )
        if not success:
            # Do not continue if not enough clients are available
            log(
                INFO,
                "FedFS: not enough clients available after timeout %s",
                WAIT_TIMEOUT,
            )
            return []

        # Sample clients
        clients = self._contribution_based_sampling(
            sample_size=sample_size, client_manager=client_manager
        )

        # Prepare parameters and config
        parameters = weights_to_parameters(weights)
        config = {}
        if self.on_fit_config_fn is not None:
            # Use custom fit config function if provided
            config = self.on_fit_config_fn(rnd)

        # Set timeout for this round
        use_fast_timeout = is_fast_round(rnd - 1, self.r_fast, self.r_slow)
        config["timeout"] = str(self.t_fast if use_fast_timeout else self.t_slow)

        # Fit instructions
        fit_ins = FitIns(parameters, config)

        # Return client/config pairs
        return [(client, fit_ins) for client in clients]
Пример #18
0
    def aggregate_fit(
        self,
        rnd: int,
        results: List[Tuple[ClientProxy, FitRes]],
        failures: List[BaseException],
    ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
        """Aggregate fit results using weighted average."""
        fedavg_parameters_aggregated, metrics_aggregated = super(
        ).aggregate_fit(rnd=rnd, results=results, failures=failures)
        if fedavg_parameters_aggregated is None:
            return None, {}

        fedavg_aggregate = parameters_to_weights(fedavg_parameters_aggregated)
        aggregated_updates = [
            subset_weights - self.current_weights[idx]
            for idx, subset_weights in enumerate(fedavg_aggregate)
        ]

        # Adagrad
        delta_t = aggregated_updates
        if not self.v_t:
            self.v_t = [
                np.zeros_like(subset_weights) for subset_weights in delta_t
            ]

        self.v_t = [
            self.v_t[idx] + np.multiply(subset_weights, subset_weights)
            for idx, subset_weights in enumerate(delta_t)
        ]

        new_weights = [
            self.current_weights[idx] + self.eta * delta_t[idx] /
            (np.sqrt(self.v_t[idx]) + self.tau) for idx in range(len(delta_t))
        ]
        self.current_weights = new_weights

        return weights_to_parameters(self.current_weights), metrics_aggregated
Пример #19
0
 def get_parameters(self) -> ParametersRes:
     """Return the current local model parameters."""
     weights = self.keras_client.get_weights()
     parameters = weights_to_parameters(weights)
     return ParametersRes(parameters=parameters)
Пример #20
0
 def get_parameters(self) -> ParametersRes:
     """Return the current local model parameters."""
     parameters = self.numpy_client.get_parameters()
     parameters_proto = weights_to_parameters(parameters)
     return ParametersRes(parameters=parameters_proto)
Пример #21
0
    def configure_fit(
            self, rnd: int, weights: Weights,
            client_manager: ClientManager) -> List[Tuple[ClientProxy, FitIns]]:
        """Configure the next round of training."""

        # Block until `min_num_clients` are available
        sample_size, min_num_clients = self.num_fit_clients(
            client_manager.num_available())
        success = client_manager.wait_for(num_clients=min_num_clients,
                                          timeout=WAIT_TIMEOUT)
        if not success:
            # Do not continue if not enough clients are available
            log(
                INFO,
                "FedFS: not enough clients available after timeout %s",
                WAIT_TIMEOUT,
            )
            return []

        # Sample clients
        msg = "FedFS round %s, sample %s clients (based on all previous contributions)"
        if self.alternating_timeout:
            log(
                DEBUG,
                msg,
                str(rnd),
                str(sample_size),
            )
            clients = self._contribution_based_sampling(
                sample_size=sample_size, client_manager=client_manager)
        elif self.importance_sampling:
            if rnd == 1:
                # Sample with 1/k in the first round
                log(
                    DEBUG,
                    "FedFS round %s, sample %s clients with 1/k",
                    str(rnd),
                    str(sample_size),
                )
                clients = self._one_over_k_sampling(
                    sample_size=sample_size, client_manager=client_manager)
            else:
                fast_round = is_fast_round(rnd - 1,
                                           r_fast=self.r_fast,
                                           r_slow=self.r_slow)
                log(
                    DEBUG,
                    "FedFS round %s, sample %s clients, fast_round %s",
                    str(rnd),
                    str(sample_size),
                    str(fast_round),
                )
                clients = self._fs_based_sampling(
                    sample_size=sample_size,
                    client_manager=client_manager,
                    fast_round=fast_round,
                )
        else:
            clients = self._one_over_k_sampling(sample_size=sample_size,
                                                client_manager=client_manager)

        # Prepare parameters and config
        parameters = weights_to_parameters(weights)
        config = {}
        if self.on_fit_config_fn is not None:
            # Use custom fit config function if provided
            config = self.on_fit_config_fn(rnd)

        # Set timeout for this round
        if self.dynamic_timeout:
            if self.durations:
                candidates = timeout_candidates(
                    durations=self.durations,
                    max_timeout=self.t_slow,
                )
                timeout = next_timeout(
                    candidates=candidates,
                    percentile=self.dynamic_timeout_percentile,
                )
                config["timeout"] = str(timeout)
            else:
                # Initial round has not past durations, use max_timeout
                config["timeout"] = str(self.t_slow)
        elif self.alternating_timeout:
            use_fast_timeout = is_fast_round(rnd - 1, self.r_fast, self.r_slow)
            config["timeout"] = str(
                self.t_fast if use_fast_timeout else self.t_slow)
        else:
            config["timeout"] = str(self.t_slow)

        # Fit instructions
        fit_ins = FitIns(parameters, config)

        # Return client/config pairs
        return [(client, fit_ins) for client in clients]