示例#1
0
def test_aggregate_fit() -> None:
    """Tests if adagrad fucntion is aggregating correclty."""
    # Prepare
    previous_weights: Weights = [array([0.1, 0.1, 0.1, 0.1], dtype=float32)]
    strategy = FedAdagrad(
        eta=0.1, eta_l=0.316, tau=0.5, initial_parameters=previous_weights
    )
    param_0: Parameters = weights_to_parameters(
        [array([0.2, 0.2, 0.2, 0.2], dtype=float32)]
    )
    param_1: Parameters = weights_to_parameters(
        [array([1.0, 1.0, 1.0, 1.0], dtype=float32)]
    )
    bridge = MagicMock()
    client_0 = GrpcClientProxy(cid="0", bridge=bridge)
    client_1 = GrpcClientProxy(cid="1", bridge=bridge)
    results: List[Tuple[ClientProxy, FitRes]] = [
        (
            client_0,
            FitRes(param_0, num_examples=5, num_examples_ceil=5, fit_duration=0.1),
        ),
        (
            client_1,
            FitRes(param_1, num_examples=5, num_examples_ceil=5, fit_duration=0.1),
        ),
    ]
    expected: Weights = [array([0.15, 0.15, 0.15, 0.15], dtype=float32)]

    # Execute
    actual_list = strategy.aggregate_fit(rnd=1, results=results, failures=[])
    if actual_list:
        actual = actual_list[0]
    assert (actual == expected[0]).all()
示例#2
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        weights: Weights = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        results = self.keras_client.fit(weights, ins.config)
        if len(results) == 3:
            results = cast(Tuple[List[np.ndarray], int, int], results)
            weights_prime, num_examples, num_examples_ceil = results
            metrics: Optional[Metrics] = None
        elif len(results) == 4:
            results = cast(Tuple[List[np.ndarray], int, int, Metrics], results)
            weights_prime, num_examples, num_examples_ceil, metrics = results

        # Return FitRes
        fit_duration = timeit.default_timer() - fit_begin
        weights_prime_proto = weights_to_parameters(weights_prime)
        return FitRes(
            parameters=weights_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples_ceil,
            fit_duration=fit_duration,
            metrics=metrics,
        )
示例#3
0
    def fit(self, ins: FitIns) -> FitRes:
        print(f"Client {self.cid}: fit")

        weights: Weights = fl.common.parameters_to_weights(ins.parameters)
        config = ins.config
        fit_begin = timeit.default_timer()

        # Get training config
        epochs = int(config["epochs"])
        batch_size = int(config["batch_size"])

        # Set model parameters
        self.model.set_weights(weights)

        # Train model
        trainloader = torch.utils.data.DataLoader(self.trainset,
                                                  batch_size=batch_size,
                                                  shuffle=True)
        cifar.train(self.model, trainloader, epochs=epochs, device=DEVICE)

        # Return the refined weights and the number of examples used for training
        weights_prime: Weights = self.model.get_weights()
        params_prime = fl.common.weights_to_parameters(weights_prime)
        num_examples_train = len(self.trainset)
        fit_duration = timeit.default_timer() - fit_begin
        return FitRes(
            parameters=params_prime,
            num_examples=num_examples_train,
            num_examples_ceil=num_examples_train,
            fit_duration=fit_duration,
        )
示例#4
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        results = self.numpy_client.fit(parameters, ins.config)
        if len(results) == 2:
            print(DEPRECATION_WARNING_FIT)
            results = cast(Tuple[List[np.ndarray], int], results)
            parameters_prime, num_examples = results
            metrics: Optional[Metrics] = None
        elif len(results) == 3:
            results = cast(Tuple[List[np.ndarray], int, Metrics], results)
            parameters_prime, num_examples, metrics = results

        # Return FitRes
        fit_duration = timeit.default_timer() - fit_begin
        parameters_prime_proto = weights_to_parameters(parameters_prime)
        return FitRes(
            parameters=parameters_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples,  # Deprecated
            fit_duration=fit_duration,  # Deprecated
            metrics=metrics,
        )
def test_aggregate_fit_no_failures() -> None:
    """Test evaluate function."""
    # Prepare
    strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.99)
    results: List[Tuple[ClientProxy, FitRes]] = [
        (MagicMock(), FitRes(Parameters(tensors=[], tensor_type=""), 1, 1, 0.1))
    ]
    failures: List[BaseException] = []
    expected: Optional[Weights] = []

    # Execute
    actual = strategy.aggregate_fit(1, results, failures)

    # Assert
    assert actual == expected
示例#6
0
    def fit(self, ins: FitIns) -> FitRes:
        print(f"Client {self.cid}: fit")
        
        weights: Weights = fl.common.parameters_to_weights(ins.parameters)
        config = ins.config
        fit_begin = timeit.default_timer()

        # Get training config
        epochs = int(config["epochs"])
        batch_size = int(config["batch_size"])
        learning_rate = float(config["learning_rate"])
        
        # Set model parameters
        self.model.set_weights(weights)

        # Train model
        trainloader = torch.utils.data.DataLoader(
            self.trainset, batch_size=batch_size, shuffle=True
        )
        modules.pt_train(
            net=self.model, 
            trainloader=trainloader, 
            epochs=epochs, 
            learning_rate=learning_rate, 
            device=DEVICE
        )
        
        # Get weights from the model
        weights_prime: Weights = self.model.get_weights()
        
        # Check if quantization is requested
        if glb.QUANTIZE:
            weights_prime: Weights = modules.quantize(
                weights=weights_prime, 
                bits=glb.Q_BITS
            )
        
        # Return the refined weights and the number of examples used for training
        params_prime = fl.common.weights_to_parameters(weights_prime)
        num_examples_train = len(self.trainset)
        fit_duration = timeit.default_timer() - fit_begin
        return FitRes(
            parameters=params_prime,
            num_examples=num_examples_train,
            num_examples_ceil=num_examples_train,
            fit_duration=fit_duration,
        )
示例#7
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        parameters_prime, num_examples = self.numpy_client.fit(parameters, ins.config)
        fit_duration = timeit.default_timer() - fit_begin

        # Return FitRes
        parameters_prime_proto = weights_to_parameters(parameters_prime)
        return FitRes(
            parameters=parameters_prime_proto,
            num_examples=num_examples,
            num_examples_ceil=num_examples,  # num_examples == num_examples_ceil
            fit_duration=fit_duration,
        )
示例#8
0
    def fit(self, ins: FitIns) -> FitRes:
        print(f"Client {self.cid}: fit")

        weights: Weights = fl.common.parameters_to_weights(ins.parameters)
        config = ins.config
        fit_begin = timeit.default_timer()

        # Get training config
        epochs = int(config["epochs"])
        batch_size = int(config["batch_size"])
        pin_memory = bool(config["pin_memory"])
        num_workers = int(config["num_workers"])

        # Set model parameters
        set_weights(self.model, weights)

        if torch.cuda.is_available():
            kwargs = {
                "num_workers": num_workers,
                "pin_memory": pin_memory,
                "drop_last": True,
            }
        else:
            kwargs = {"drop_last": True}

        # Train model
        trainloader = torch.utils.data.DataLoader(self.trainset,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  **kwargs)
        utils.train(self.model, trainloader, epochs=epochs, device=DEVICE)

        # Return the refined weights and the number of examples used for training
        weights_prime: Weights = get_weights(self.model)
        params_prime = fl.common.weights_to_parameters(weights_prime)
        num_examples_train = len(self.trainset)
        fit_duration = timeit.default_timer() - fit_begin
        return FitRes(
            parameters=params_prime,
            num_examples=num_examples_train,
            num_examples_ceil=num_examples_train,
            fit_duration=fit_duration,
        )
示例#9
0
    def fit(self, ins: FitIns) -> FitRes:
        """Refine the provided weights using the locally held dataset."""
        # Deconstruct FitIns
        weights: Weights = parameters_to_weights(ins.parameters)

        # Train
        fit_begin = timeit.default_timer()
        weights_prime, num_examples, num_examples_ceil = self.keras_client.fit(
            weights, ins.config)
        fit_duration = timeit.default_timer() - fit_begin

        # Return FitRes
        parameters = weights_to_parameters(weights_prime)
        return FitRes(
            parameters=parameters,
            num_examples=num_examples,
            num_examples_ceil=num_examples_ceil,
            fit_duration=fit_duration,
        )
示例#10
0
    def fit(self, ins: FitIns) -> FitRes:
        print(f"Client {self.cid}: fit")

        weights: Weights = fl.common.parameters_to_weights(ins.parameters)
        config = ins.config
        fit_begin = timeit.default_timer()

        # Get training config
        epochs = int(config["epochs"])
        batch_size = int(config["batch_size"])
        num_workers = 0

        # Set model parameters
        set_weights(self.model, weights)

        # Train model
        weights = self.trainset.make_weights_for_balanced_classes()
        sampler = WeightedRandomSampler(weights, len(weights))

        trainloader = torch.utils.data.DataLoader(self.trainset,
                                                  batch_size=batch_size,
                                                  sampler=sampler,
                                                  pin_memory=use_gpu,
                                                  num_workers=num_workers)
        train(self.model, trainloader, epochs=epochs, device=DEVICE)

        # Return the refined weights and the number of examples used for training
        weights_prime: Weights = get_weights(self.model)
        params_prime = fl.common.weights_to_parameters(weights_prime)
        num_examples_train = len(self.trainset)
        fit_duration = timeit.default_timer() - fit_begin
        return FitRes(
            parameters=params_prime,
            num_examples=num_examples_train,
            num_examples_ceil=num_examples_train,
            fit_duration=fit_duration,
        )
示例#11
0
 def fit(self, ins: FitIns) -> FitRes:
     arr = np.array([[1, 2], [3, 4], [5, 6]])
     arr_serialized = ndarray_to_bytes(arr)
     return FitRes(Parameters(tensors=[arr_serialized], tensor_type=""), 1, 1, 12.3)