Exemple #1
0
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])
Exemple #2
0
class TestHomoLRGradient(unittest.TestCase):
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])

    def test_gradient_length(self):
        fit_intercept = False
        grad = self.gradient_operator.compute_gradient(self.values, self.coef,
                                                       0, fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1])

        taylor_grad = self.taylor_operator.compute_gradient(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1])
        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)

        fit_intercept = True
        grad = self.gradient_operator.compute_gradient(self.values, self.coef,
                                                       0, fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1] + 1)

        taylor_grad = self.taylor_operator.compute_gradient(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1] + 1)

        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)
Exemple #3
0
 def _init_model(self, params):
     super()._init_model(params)
     self.cipher.register_paillier_cipher(self.transfer_variable)
     if params.encrypt_param.method in [consts.PAILLIER]:
         self.use_encrypt = True
         self.gradient_operator = TaylorLogisticGradient()
         self.re_encrypt_batches = params.re_encrypt_batches
     else:
         self.use_encrypt = False
         self.gradient_operator = LogisticGradient()