Пример #1
0
    def setUpClass(cls):

        cls.nb_devices = 10
        cls.input_size = 4
        cls.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        net = LogisticReg

        #### global model ##########
        cls.global_model = net(input_size=cls.input_size).to(cls.device)

        ############## client models ##############
        cls.client_models = [net(input_size=cls.input_size).to(cls.device) for i in range(cls.nb_devices)]
        for model in cls.client_models:
            model.load_state_dict(cls.global_model.state_dict())

        cls.shapes = [torch.Size([1,4])]

        cls.params = BiQSGD().define(cost_models=None,
                                     n_dimensions=cls.input_size,
                                     stochastic=False,
                                     nb_epoch=10000,
                                     nb_devices=cls.nb_devices,
                                     batch_size=10,
                                     fraction_sampled_workers=1,
                                     up_compression_model=SQuantization(1, norm=2),
                                     down_compression_model=SQuantization(1, norm=2))
        cls.params = cast_to_DL(cls.params, None, None, 0.1, 0, None)

        cls.optimizers = [optim.SGD(model.parameters(), lr=0.1, momentum=0) for model in cls.client_models]
Пример #2
0
 def __init__(self, parameters: Parameters) -> None:
     super().__init__(parameters)
     # Vanilla SGD doesn't carry out any compression.
     self.parameters.up_compression_model = SQuantization(
         0, self.parameters.n_dimensions)
     self.parameters.down_compression_model = SQuantization(
         0, self.parameters.n_dimensions)
Пример #3
0
    def setUpClass(cls):
        """ get_some_resource() is slow, to avoid calling it for each test use setUpClass()
            and store the result as class variable
        """
        super(PerformancesTest, cls).setUpClass()

        ### RMSE ###

        # Creating cost models which will be used to computed cost/loss, gradients, L ...
        cls.linear_cost_models = build_several_cost_model(RMSEModel, linear_X, linear_Y, nb_devices)

        # Defining parameters for the performances test.
        cls.linear_params = Parameters(n_dimensions=dim_test + 1,
                                       nb_devices=nb_devices,
                                       up_compression_model=SQuantization(1, dim_test + 1),
                                       step_formula=deacreasing_step_size,
                                       nb_epoch=nb_epoch,
                                       use_averaging=False,
                                       cost_models=cls.linear_cost_models,
                                       stochastic=True)

        obj_min_by_N_descent = SGD_Descent(Parameters(n_dimensions=dim_test + 1,
                                                      nb_devices=nb_devices,
                                                      nb_epoch=200,
                                                      momentum=0.,
                                                      verbose=True,
                                                      cost_models=cls.linear_cost_models,
                                                      stochastic=False,
                                                      bidirectional=False
                                                      ))
        obj_min_by_N_descent.run(cls.linear_cost_models)
        cls.linear_obj = obj_min_by_N_descent.train_losses[-1]

        # For LOGISTIC:
        
        cls.logistic_cost_models = build_several_cost_model(LogisticModel, logistic_X, logistic_Y, nb_devices)
        
        # Defining parameters for the performances test.
        cls.logistic_params = Parameters(n_dimensions=2,
                                         nb_devices=nb_devices,
                                         up_compression_model=SQuantization(1, 3),
                                         step_formula=deacreasing_step_size,
                                         nb_epoch=nb_epoch,
                                         use_averaging=False,
                                         cost_models=cls.logistic_cost_models,
                                         stochastic=True)

        obj_min_by_N_descent = SGD_Descent(Parameters(n_dimensions=2,
                                                      nb_devices=nb_devices,
                                                      nb_epoch=200,
                                                      momentum=0.,
                                                      verbose=True,
                                                      cost_models=cls.logistic_cost_models,
                                                      stochastic=False,
                                                      bidirectional=False
                                                      ))
        obj_min_by_N_descent.run(cls.logistic_cost_models)
        cls.logistic_obj = obj_min_by_N_descent.train_losses[-1]
Пример #4
0
 def test_quantization_with_bucket(self):
     quantization = SQuantization(level=1, norm=2)
     quantization.bucket_size = 4
     vector = torch.Tensor([1, 2, 3, 4, 11, 2, 30, 4, 8, 1])
     bucket_quantization = torch.Tensor([
         0.0000, 0.0000, 0.0000, 5.4772257805, 0.0000, 0.0000,
         32.2645301819, 32.2645301819, 8.0622577667, 0.0000
     ])
     torch.manual_seed(10)
     a = quantization.compress(vector)
     print(a)
     assert a.equal(
         bucket_quantization), "The quantization by bucket is incorrect."
Пример #5
0
def name_of_the_experiments(dataset: str, stochastic: bool):
    """Return the name of the experiments."""
    default_up_compression = SQuantization(quantization_levels[dataset],
                                           norm=norm_quantization[dataset])
    default_down_compression = SQuantization(quantization_levels[dataset],
                                             norm=norm_quantization[dataset])
    name = "{0}_m{1}_lr{2}_sup{3}_sdwn{4}_b{5}_wd{6}_norm-{7}".format(
        models[dataset].__name__, momentums[dataset],
        round(optimal_steps_size[dataset], 4), default_up_compression.level,
        default_down_compression.level, batch_sizes[dataset],
        weight_decay[dataset], norm_quantization[dataset])
    if not stochastic:
        name += "-full"
    return name
Пример #6
0
 def test_quantization_without_bucket(self):
     quantization = SQuantization(level=1, norm=2)
     quantization.bucket_size = 100
     zeros = torch.zeros(10)
     vector = torch.Tensor([1, 2, 3, 4, 5, 6, 9, 1, 2, 3, 2])
     assert quantization.__qtzt__(zeros).equal(
         zeros), "Compressing a zeros vector must return zeros."
     torch.manual_seed(10)
     single_qutzt = quantization.__qtzt__(vector)
     torch.manual_seed(10)
     bucket_qtzt = quantization.compress(vector)
     assert bucket_qtzt.equal(
         single_qutzt
     ), "A vector with less element than the bucket size should be quantized as a single vector."
Пример #7
0
 def __init__(self, parameters: Parameters) -> None:
     super().__init__(parameters)
     self.parameters.down_compression_model = SQuantization(
         0, self.parameters.n_dimensions)
Пример #8
0
 def __init__(self, parameters: Parameters) -> None:
     super().__init__(parameters)
     # Diana doesn't carry out a down compression.
     self.parameters.down_compression_model = SQuantization(
         0, self.parameters.n_dimensions)