Пример #1
0
 def test_Artemis(self):
     params = Artemis().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, update.g))
Пример #2
0
 def test_QSGD(self):
     params = Qsgd().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     # Checking that no memory have been updated.
     self.assertTrue(torch.equal(update.h, zero_tensor))
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Checking that for the return nothing has been quantized.
     self.assertTrue(torch.equal(update.value_to_compress, zero_tensor))
Пример #3
0
 def test_doubleGRADIENTcompression_WITH_additional_memory(self):
     params = DoreVariant().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     artificial_l = ones_tensor.clone().detach()
     # We artificially set different memory to check that it has impact on update computation.
     update.H = artificial_l.clone().detach()
     update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertFalse(torch.equal(update.H, artificial_l))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, update.g - artificial_l))
Пример #4
0
 def test_Diana(self):
     params = Diana().define(n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     new_model_param = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     self.assertTrue(torch.equal(update.H, zero_tensor))
     # Checking that for the return nothing has been quantized.
     # there is a pb, with this test. Pass if ran with Artmis settings.
     self.assertTrue(torch.equal(update.value_to_compress, zero_tensor))
 def test_doubleMODELcompression_without_memory(self):
     params = SGDDoubleModelCompressionWithoutMem().define(
         n_dimensions=DIM, nb_devices=1, quantization_param=10)
     params.learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     new_w = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertTrue(torch.equal(update.l, zero_tensor))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, new_w))
Пример #6
0
 def test_doubleMODELcompression_WITH_memory(self):
     params = MCM().define(n_dimensions=DIM, nb_devices=1,
                                                                      quantization_param=10)
     params.up_learning_rate = 0.5
     workers = [Worker(0, params)]
     workers[0].set_data(x, y)
     workers[0].cost_model.L = workers[0].cost_model.local_L
     update = ArtemisUpdate(params, workers)
     artificial_l = ones_tensor.clone().detach()
     update.H = artificial_l.clone().detach()
     new_w = update.compute(w, 2, 2)
     # Check that gradients have been updated.
     self.assertFalse(torch.equal(update.g, zero_tensor))
     self.assertFalse(torch.equal(update.v, zero_tensor))
     self.assertFalse(torch.equal(update.h, zero_tensor))
     # Check that l has been updated.
     self.assertFalse(torch.equal(update.H, artificial_l))
     # Check that correct value has been compressed
     self.assertTrue(torch.equal(update.value_to_compress, new_w - artificial_l))