def on_configure_evaluate( self, rnd: int, weights: Weights, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if a centralized evaluation # function is provided if self.eval_fn is not None: return [] # Parameters and config parameters = weights_to_parameters(weights) config = {} if self.on_evaluate_config_fn is not None: # Custom evaluation config function provided config = self.on_evaluate_config_fn(rnd) evaluate_ins = (parameters, config) # Sample clients sample_size, min_num_clients = self.num_evaluation_clients( client_manager.num_available()) clients = client_manager.sample(num_clients=sample_size, min_num_clients=min_num_clients) # Return client/config pairs return [(client, evaluate_ins) for client in clients]
def _one_over_k_sampling( self, sample_size: int, client_manager: ClientManager ) -> List[ClientProxy]: """Sample clients with probability 1/k.""" sample_size, min_num_clients = self.num_fit_clients( client_manager.num_available() ) clients = client_manager.sample( num_clients=sample_size, min_num_clients=min_num_clients ) return clients
def on_configure_fit( self, rnd: int, weights: Weights, client_manager: ClientManager) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.pre_weights = weights parameters = weights_to_parameters(weights) config = {} if self.on_fit_config_fn is not None: # Custom fit config function provided config = self.on_fit_config_fn(rnd) fit_ins = (parameters, config) # Sample clients sample_size, min_num_clients = self.num_fit_clients( client_manager.num_available()) clients = client_manager.sample(num_clients=sample_size, min_num_clients=min_num_clients) # Return client/config pairs return [(client, fit_ins) for client in clients]