Exemplo n.º 1
0
 def test_send_back_global_informations_and_update(self):
     artemis_update = ArtemisUpdate(self.params, self.workers)
     self.workers[0].idx_last_update = 1
     self.workers[1].idx_last_update = 1
     artemis_update.workers_sub_set = [
         (self.workers[i], self.cost_models[i])
         for i in range(self.params.nb_devices)
     ]
     artemis_update.omega_k = [[
         torch.FloatTensor([0, 50]),
         torch.FloatTensor([0, 10])
     ], [torch.FloatTensor([2, 4]),
         torch.FloatTensor([10, 20])]]
     nb_try = 1
     artemis_update.step = 1 / 10
     artemis_update.send_back_global_informations_and_update(
         self.cost_models)
     while (nb_try < 5 and torch.all(
             artemis_update.workers[0].local_update.model_param.eq(
                 artemis_update.workers[1].local_update.model_param))):
         self.workers[0].idx_last_update = 1
         self.workers[1].idx_last_update = 1
         artemis_update.send_back_global_informations_and_update(
             self.cost_models)
         nb_try += 1
     self.assertFalse(
         torch.all(artemis_update.workers[0].local_update.model_param.eq(
             artemis_update.workers[1].local_update.model_param)),
         "The models on workers are expected to be different.")
     self.assertTrue(
         self.workers[0].idx_last_update == 2
         and self.workers[1].idx_last_update == 2,
         "Index of last participation of each worker should be updated to 2"
     )
Exemplo n.º 2
0
 def test_build_randomized_omega(self):
     artemis_update = ArtemisUpdate(self.params, self.workers)
     artemis_update.workers_sub_set = self.workers
     artemis_update.value_to_compress = torch.FloatTensor(
         [i for i in range(0, 100, 10)])
     # We initilize omega_k with two values (as if we are at iteration 2)
     artemis_update.omega_k = [[
         torch.FloatTensor([i for i in range(0, 100, 10)]),
         torch.FloatTensor([i for i in range(10)])
     ],
                               [
                                   torch.FloatTensor(
                                       [i for i in range(0, 20, 2)]),
                                   torch.FloatTensor([i for i in range(10)])
                               ]]
     nb_try = 1
     # We want to check that we have two different quantization of the value to compress.
     # But in quantization there is some randomness, and thus vectors can some time be identical.
     # We carry out five try, it after that there are still equal we consider that it is uncorrect.
     artemis_update.build_randomized_omega(self.cost_models)
     self.assertEqual(
         len(artemis_update.omega), 2,
         "The number of compressed value kept on central server must be equal to 2."
     )
     while (nb_try < 5 and torch.all(artemis_update.omega[0].eq(
             artemis_update.omega[1]))):
         artemis_update.build_randomized_omega(self.cost_models)
         nb_try += 1
     self.assertTrue(
         nb_try < 5,
         "After 5 try, the two different quantizations are still identical."
     )
     self.assertTrue(len(artemis_update.omega_k) == 3)
Exemplo n.º 3
0
 def test_Diana(self):
     params = Diana().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     new_model_param = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Checking that for the return nothing has been quantized.
     # there is a pb, with this test. Pass if ran with Artmis settings.
     self.assertTrue(torch.equal(update.value_to_compress, zero_tensor))
Exemplo n.º 4
0
 def test_Artemis(self):
     params = Artemis().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, update.g))
Exemplo n.º 5
0
 def test_QSGD(self):
     params = Qsgd().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     # Checking that no memory have been updated.
     self.assertTrue(torch.equal(update.h, zero_tensor))
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Checking that for the return nothing has been quantized.
     self.assertTrue(torch.equal(update.value_to_compress, zero_tensor))
 def test_doubleMODELcompression_without_memory(self):
     params = SGDDoubleModelCompressionWithoutMem().define(
         n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     new_w = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertTrue(torch.equal(update.l, zero_tensor))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, new_w))
 def test_initialization(self):
     params = Parameters()
     workers = [Worker(0, params)]
     zero_tensor = torch.zeros(params.n_dimensions, dtype=np.float)
     update = ArtemisUpdate(params, workers)
     self.assertTrue(torch.equal(update.g, zero_tensor))
     self.assertTrue(torch.equal(update.h, zero_tensor))
     self.assertTrue(torch.equal(update.v, zero_tensor))
     self.assertTrue(torch.equal(update.l, zero_tensor))
     self.assertTrue(torch.equal(update.value_to_compress, zero_tensor))
Exemplo n.º 8
0
 def test_doubleMODELcompression_WITH_memory(self):
     params = MCM().define(n_dimensions=DIM, nb_devices=1,
                                                                      quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     artificial_l = ones_tensor.clone().detach()
     update.H = artificial_l.clone().detach()
     new_w = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertFalse(torch.equal(update.H, artificial_l))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, new_w - artificial_l))
Exemplo n.º 9
0
 def test_doubleGRADIENTcompression_WITH_additional_memory(self):
     params = DoreVariant().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     artificial_l = ones_tensor.clone().detach()
     # We artificially set different memory to check that it has impact on update computation.
     update.H = artificial_l.clone().detach()
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertFalse(torch.equal(update.H, artificial_l))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, update.g - artificial_l))
Exemplo n.º 10
0
 def test_update_randomized_model(self):
     artemis_update = ArtemisUpdate(self.params, self.workers)
     artemis_update.workers_sub_set = [(self.workers[i], self.cost_models[i]) for i in range(self.params.nb_devices)]
     artemis_update.H = torch.FloatTensor([-1 for i in range(10)])
     artemis_update.omega = [torch.FloatTensor([i for i in range(0, 100, 10)]),
                                torch.FloatTensor([i for i in range(0,20, 2)])]
     # Without momentum, should have no impact.
     artemis_update.v = torch.FloatTensor([1 for i in range(10)])
     artemis_update.update_randomized_model()
     print(artemis_update.omega)
     # Valid with the method of averaging
     self.assertTrue(torch.all(artemis_update.v.eq(torch.FloatTensor([6*i - 1 for i in range(10)]))))
 def __update_method__(self) -> BasicGradientUpdate:
     return ArtemisUpdate(self.parameters, self.workers)