Пример #1
0
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])
Пример #2
0
class TestHomoLRGradient(unittest.TestCase):
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])

    def test_gradient_length(self):
        fit_intercept = False
        grad = self.gradient_operator.compute_gradient(self.values, self.coef,
                                                       0, fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1])

        taylor_grad = self.taylor_operator.compute_gradient(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1])
        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)

        fit_intercept = True
        grad = self.gradient_operator.compute_gradient(self.values, self.coef,
                                                       0, fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1] + 1)

        taylor_grad = self.taylor_operator.compute_gradient(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1] + 1)

        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)
Пример #3
0
    def __init__(self):
        super(HomoLRGuest, self).__init__()
        self.gradient_operator = LogisticGradient()
        self.loss_history = []
        self.role = consts.GUEST
        self.aggregator = aggregator.Guest()

        self.zcl_encrypt_operator = PaillierEncrypt()
Пример #4
0
 def _init_model(self, params):
     super()._init_model(params)
     self.cipher.register_paillier_cipher(self.transfer_variable)
     if params.encrypt_param.method in [consts.PAILLIER]:
         self.use_encrypt = True
         self.gradient_operator = TaylorLogisticGradient()
         self.re_encrypt_batches = params.re_encrypt_batches
     else:
         self.use_encrypt = False
         self.gradient_operator = LogisticGradient()
Пример #5
0
 def __init__(self):
     super(HomoLRGuest, self).__init__()
     self.gradient_operator = LogisticGradient()
     self.loss_history = []
     self.role = consts.GUEST