예제 #1
0
 def test_StochasticSingleCompressionWithMemory_iscorrect(self):
     n_dimensions, nb_devices, quantization_param = 10, 10, 1
     params = Diana().define(
         cost_models=RMSEModel(X, Y_reg), n_dimensions=n_dimensions, nb_devices=nb_devices,
         quantization_param=quantization_param, step_formula=step_formula_for_test)
     self.assertEqual(params.n_dimensions, n_dimensions)
     self.assertEqual(params.nb_devices, nb_devices)
     self.assertEqual(params.nb_epoch, NB_EPOCH)
     self.assertEqual(params.step_formula.__code__.co_code, step_formula_for_test.__code__.co_code)
     self.assertEqual(params.quantization_param, quantization_param)
     self.assertEqual(params.momentum, 0)
     self.assertEqual(params.verbose, False)
     self.assertEqual(params.stochastic, True)
     self.assertIs(type(params.cost_models), type(RMSEModel(X, Y_reg)))
     self.assertEqual(params.use_averaging, False)
     self.assertEqual(params.bidirectional, False)
예제 #2
0
 def test_rmse_set_data(self):
     cost_model = RMSEModel(X, Y_reg)
     self.assertIs(type(cost_model), RMSEModel)
     self.assertIs(type(cost_model.regularization), NoRegularization)
     self.assertTrue(torch.equal(X, cost_model.X))
     self.assertTrue(torch.equal(Y_reg, cost_model.Y))
     print(cost_model.local_L)
     self.assertEqual(cost_model.local_L, 2 * sqrt(18) / 3)
예제 #3
0
    def test_parameters_instantiation_without_arguments(self):
        """All arguments of parameters class have default values. Check that this values are correct."""
        params = Parameters(cost_models=RMSEModel(X, Y_reg), step_formula=step_formula_for_test)

        self.assertIs(type(params.cost_models), type(RMSEModel(X, Y_reg)))
        self.assertEqual(params.federated, False)
        self.assertEqual(params.n_dimensions, DIM)
        self.assertEqual(params.nb_devices, NB_DEVICES)
        self.assertEqual(params.batch_size, 1)
        self.assertEqual(
            params.step_formula.__code__.co_code, step_formula_for_test.__code__.co_code)
        self.assertEqual(params.nb_epoch, NB_EPOCH)
        self.assertEqual(params.regularization_rate, 0)
        self.assertEqual(params.momentum, 0)
        self.assertIsNone(params.quantization_param)
        self.assertEqual(params.up_learning_rate, None)
        self.assertEqual(params.force_learning_rate, False)
        self.assertEqual(params.bidirectional, False)
        self.assertEqual(params.verbose, False)
        self.assertEqual(params.stochastic, True)
        self.assertEqual(params.down_compress_model, True)
        self.assertEqual(params.use_down_memory, False)
        self.assertEqual(params.use_averaging, False)
예제 #4
0
 def define(self, n_dimensions: int, nb_devices: int, quantization_param: int = 0, step_formula=None,
            momentum: float = 0, nb_epoch: int = NB_EPOCH, use_averaging=False,
            model: ACostModel = RMSEModel(), stochastic=True):
     return Parameters(n_dimensions=n_dimensions,
                       nb_devices=nb_devices,
                       nb_epoch=nb_epoch,
                       step_formula=step_formula,
                       quantization_param=0,
                       momentum=momentum,
                       verbose=False,
                       stochastic=stochastic,
                       bidirectional=False,
                       cost_model=model,
                       use_averaging=use_averaging
                       )
예제 #5
0
 def __init__(self,
              cost_model: ACostModel = RMSEModel(),
              federated: bool = False,
              n_dimensions: int = DIM,
              nb_devices: int = NB_DEVICES,
              batch_size: int = 1,
              step_formula=None,
              nb_epoch: int = NB_EPOCH,
              regularization_rate: int = 0,
              momentum: int = 0,
              quantization_param: int = None,
              learning_rate: int = None,
              force_learning_rate: bool = False,
              bidirectional: bool = False,
              verbose: bool = False,
              stochastic: bool = True,
              compress_gradients: bool = True,
              double_use_memory: bool = False,
              use_averaging: bool = False) -> None:
     super().__init__()
     self.cost_model = cost_model  # Cost model to use for gradient descent.
     self.federated = federated  # Boolean to say if we do federated learning or not.
     self.n_dimensions = n_dimensions  # Dimension of the problem.
     self.nb_devices = nb_devices  # Number of device on the network.
     self.batch_size = batch_size  # Batch size.
     self.step_formula = default_step_formula(stochastic) if sqrt(n_dimensions) < 0.5 * nb_devices \
         else default_step_formula_large_dim(stochastic, bidirectional, quantization_param)
     # To compute the step size at each iteration, we use a lambda function which takes as parameters
     # the number of current epoch, the coefficient of smoothness and the quantization constant omega_c.
     # if step_formula == None:
     #     self.step_formula = constant_step_size_formula(bidirectional, nb_devices, n_dimensions)
     #     self.step_formula = default_step_formula(stochastic)
     # else:
     #     self.step_formula = step_formula
     self.nb_epoch = nb_epoch  # number of epoch of the run
     self.regularization_rate = regularization_rate  # coefficient of regularization
     self.force_learning_rate = force_learning_rate
     self.momentum = momentum  # momentum coefficient
     self.quantization_param = quantization_param  # quantization parameter
     self.omega_c = 0  # quantization constant involved in the variance inequality of the scheme
     self.learning_rate = learning_rate
     self.bidirectional = bidirectional
     self.stochastic = stochastic  # true if runing a stochastic gradient descent
     self.compress_gradients = compress_gradients
     self.double_use_memory = double_use_memory  # Use a
     self.verbose = verbose
     self.use_averaging = use_averaging  # true if using a Polyak-Ruppert averaging.
예제 #6
0
    def define(self, n_dimensions: int, nb_devices: int, quantization_param: int,
               step_formula=None, momentum: float = 0,
               nb_epoch: int = NB_EPOCH,
               use_averaging=False, model: ACostModel = RMSEModel(), stochastic=True):
        """Define parameters to be used during the descent.

        Args:
            n_dimensions: dimensions of the problem.
            nb_devices: number of device in the federated network.
            quantization_param: parameter of quantization.
            step_formula: lambda formul to compute the step size at each iteration.
            momentum: momentum coefficient.
            nb_epoch: number of epoch for the run.
            use_averaging: true if using Polyak-Rupper Averaging.
            model: cost model of the problem (e.g least-square, logistic ...).
            stochastic: true if running stochastic descent.

        Returns:
            Build parameters.
        """
        pass
def multiple_run_descent(predefined_parameters: PredefinedParameters, X, Y,
                         nb_epoch=NB_EPOCH,
                         quantization_param: int = 1,
                         step_formula=None,
                         use_averaging=False,
                         model=RMSEModel(),
                         stochastic=True) -> MultipleDescentRun:
    """

    Args:
        predefined_parameters: predefined parameters
        X: data
        Y: labels
        nb_epoch: number of epoch for the each run
        quantization_param:
        step_formula: lambda function to compute the step size at each iteration.
        use_averaging: true if using Polyak-Rupper averaging.
        model: cost model of the problem (e.g least-square, logistic ...).
        stochastic: true if running stochastic descent.

    Returns:
    """
    print(predefined_parameters.name())

    multiple_descent = MultipleDescentRun()
    for i in range(nb_run):
        params = predefined_parameters.define(n_dimensions=X[0].shape[1],
                                              nb_devices=len(X),
                                              quantization_param=quantization_param,
                                              step_formula=step_formula,
                                              nb_epoch=nb_epoch,
                                              use_averaging=use_averaging,
                                              model=model,
                                              stochastic=stochastic)
        model_descent = predefined_parameters.type_FL()(params)
        model_descent.set_data(X, Y)
        model_descent.run()
        multiple_descent.append(model_descent)
    return multiple_descent
    def setUpClass(cls):
        """ get_some_resource() is slow, to avoid calling it for each test use setUpClass()
            and store the result as class variable
        """
        super(PerformancesTest, cls).setUpClass()

        ### RMSE ###

        # Defining parameters for the performances test.
        cls.linear_params = Parameters(n_dimensions=dim_test + 1,
                                       nb_devices=nb_devices,
                                       quantization_param=1,
                                       step_formula=None,
                                       nb_epoch=nb_epoch,
                                       use_averaging=False,
                                       cost_model=RMSEModel(),
                                       stochastic=True)

        obj_min_by_N_descent = FL_VanillaSGD(
            Parameters(
                n_dimensions=dim_test + 1,
                nb_devices=nb_devices,
                nb_epoch=200,
                momentum=0.,
                quantization_param=0,
                verbose=True,
                cost_model=RMSEModel(),
                stochastic=False,
                bidirectional=False,
            ))
        obj_min_by_N_descent.set_data(linear_X[:nb_devices],
                                      linear_Y[:nb_devices])
        obj_min_by_N_descent.run()
        cls.linear_obj = obj_min_by_N_descent.losses[-1]

        # For LOGISTIC:

        # Defining parameters for the performances test.
        cls.logistic_params = Parameters(n_dimensions=2,
                                         nb_devices=nb_devices,
                                         quantization_param=1,
                                         step_formula=None,
                                         nb_epoch=nb_epoch,
                                         use_averaging=False,
                                         cost_model=LogisticModel(),
                                         stochastic=True)

        obj_min_by_N_descent = FL_VanillaSGD(
            Parameters(
                n_dimensions=2,
                nb_devices=nb_devices,
                nb_epoch=200,
                momentum=0.,
                quantization_param=0,
                verbose=True,
                cost_model=LogisticModel(),
                stochastic=False,
                bidirectional=False,
            ))
        obj_min_by_N_descent.set_data(logistic_X[:nb_devices],
                                      logistic_Y[:nb_devices])
        obj_min_by_N_descent.run()
        cls.logistic_obj = obj_min_by_N_descent.losses[-1]
예제 #9
0
 def define(self, n_dimensions: int, nb_devices: int, quantization_param: int, step_formula=None,
            momentum: float = 0, nb_epoch: int = NB_EPOCH, use_averaging=False, model: ACostModel = RMSEModel(),
            stochastic=True):
     return Parameters(n_dimensions=n_dimensions,
                       nb_devices=nb_devices,
                       nb_epoch=nb_epoch,
                       step_formula=step_formula,
                       quantization_param=1,
                       learning_rate=0,
                       momentum=momentum,
                       verbose=False,
                       stochastic=stochastic,
                       cost_model=model,
                       use_averaging=use_averaging,
                       bidirectional=True,
                       double_use_memory=False,
                       compress_gradients=True
                       )