Ejemplo n.º 1
0
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])
Ejemplo n.º 2
0
class TestHomoLRGradient(unittest.TestCase):
    def setUp(self):
        self.paillier_encrypt = PaillierEncrypt()
        self.paillier_encrypt.generate_key()
        self.gradient_operator = LogisticGradient()
        self.taylor_operator = TaylorLogisticGradient()

        self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [
            2,
            2,
            3,
            1,
            1,
        ]]) / 10
        self.X1 = np.c_[self.X, np.ones(3)]

        self.Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.X):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values.append((idx, inst))

        self.values1 = []
        for idx, x in enumerate(self.X1):
            inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
            self.values1.append((idx, inst))

        self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
        self.coef1 = np.append(self.coef, [1])

    def test_gradient_length(self):
        fit_intercept = False
        grad, loss = self.gradient_operator.compute(self.values, self.coef, 0,
                                                    fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1])

        taylor_grad, loss = self.taylor_operator.compute(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1])
        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)

        fit_intercept = True
        grad, loss = self.gradient_operator.compute(self.values, self.coef, 0,
                                                    fit_intercept)
        self.assertEqual(grad.shape[0], self.X.shape[1] + 1)

        taylor_grad, loss = self.taylor_operator.compute(
            self.values, self.coef, 0, fit_intercept)
        self.assertEqual(taylor_grad.shape[0], self.X.shape[1] + 1)

        self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)
Ejemplo n.º 3
0
    def __init__(self, params: LogisticParam):
        super(HomoLRHost, self).__init__(params)

        self.learning_rate = params.learning_rate
        self.batch_size = params.batch_size
        self.encrypt_params = params.encrypt_param

        if self.encrypt_params.method in [consts.PAILLIER]:
            self.use_encrypt = True
        else:
            self.use_encrypt = False

        if self.use_encrypt and params.penalty != consts.L2_PENALTY:
            raise RuntimeError("Encrypted h**o-lr supports L2 penalty only")

        if self.use_encrypt:
            self.gradient_operator = TaylorLogisticGradient()
            self.re_encrypt_batches = params.re_encrypt_batches
        else:
            self.gradient_operator = LogisticGradient()

        self.aggregator = HomoFederatedAggregator()
        self.party_weight = params.party_weight

        self.optimizer = Optimizer(learning_rate=self.learning_rate, opt_method_name=params.optimizer)
        self.transfer_variable = HomoLRTransferVariable()
        self.initializer = Initializer()
        self.mini_batch_obj = None
        self.evaluator = Evaluation(classi_type=consts.BINARY)
        self.classes_ = [0, 1]
        self.has_sychronized_encryption = False
Ejemplo n.º 4
0
    def setUp(self):

        self.guest_X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1],
                                 [
                                     2,
                                     2,
                                     3,
                                     1,
                                     1,
                                 ]]) / 10
        self.guest_Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.guest_X):
            inst = Instance(inst_id=idx, features=x, label=self.guest_Y[idx])
            self.values.append((idx, inst))

        self.host_X = np.array([[1, 1.2, 3.1, 4, 5], [2.3, 2, 4, 5.3, 1],
                                [
                                    2,
                                    2.2,
                                    1.3,
                                    1,
                                    1.6,
                                ]]) / 10
        self.host_Y = np.array([[-1], [1], [-1]])

        self.host_values = []
        for idx, x in enumerate(self.host_X):
            inst = Instance(inst_id=idx, features=x, label=self.host_Y[idx])
            self.values.append((idx, inst))

        self.max_iter = 10
        self.alpha = 0.01
        self.learning_rate = 0.01
        optimizer = 'SGD'
        self.gradient_operator = LogisticGradient()
        self.initializer = Initializer()
        self.fit_intercept = True
        self.init_param_obj = InitParam(fit_intercept=self.fit_intercept)
        self.updater = L2Updater(self.alpha, self.learning_rate)
        self.optimizer = Optimizer(learning_rate=self.learning_rate,
                                   opt_method_name=optimizer)
        self.__init_model()
Ejemplo n.º 5
0
    def __init__(self):
        super(HomoLRGuest, self).__init__()
        self.aggregator = HomoFederatedAggregator
        self.gradient_operator = LogisticGradient()

        self.initializer = Initializer()
        self.classes_ = [0, 1]

        self.evaluator = Evaluation()
        self.loss_history = []
        self.is_converged = False
        self.role = consts.GUEST
Ejemplo n.º 6
0
    def _init_model(self, params):
        super(HomoLRHost, self)._init_model(params)
        encrypt_params = params.encrypt_param
        if encrypt_params.method in [consts.PAILLIER]:
            self.use_encrypt = True
        else:
            self.use_encrypt = False

        if self.use_encrypt and params.penalty == 'L1':
            raise RuntimeError(
                "Encrypted h**o-lr supports L2 penalty or 'none' only")

        if self.use_encrypt:
            self.gradient_operator = TaylorLogisticGradient()
            self.re_encrypt_batches = params.re_encrypt_batches
        else:
            self.gradient_operator = LogisticGradient()
Ejemplo n.º 7
0
    def __init__(self, params: LogisticParam):
        super(HomoLRGuest, self).__init__(params)
        self.learning_rate = params.learning_rate
        self.aggregator = HomoFederatedAggregator
        self.gradient_operator = LogisticGradient()
        self.party_weight = params.party_weight

        self.optimizer = Optimizer(learning_rate=self.learning_rate,
                                   opt_method_name=params.optimizer)
        self.transfer_variable = HomoLRTransferVariable()
        self.initializer = Initializer()
        self.classes_ = [0, 1]

        self.evaluator = Evaluation()
        self.header = []
        self.penalty = params.penalty
        self.loss_history = []
        self.is_converged = False
Ejemplo n.º 8
0
    def __init__(self, params: LogisticParam):
        """
        :param penalty: l1 or l2
        :param alpha:
        :param lr:
        :param eps:
        :param max_iter:
        :param optim_method: must be in ['sgd', 'RMSProp' ,'Adam', 'AdaGrad']
        :param batch_size: only work when otpim_method is mini-batch, represent for mini-batch's size
        """
        super(HomoLRGuest, self).__init__(params)

        self.learning_rate = params.learning_rate
        self.aggregator = HomoFederatedAggregator
        self.gradient_operator = LogisticGradient()
        self.party_weight = params.party_weight

        self.optimizer = Optimizer(learning_rate=self.learning_rate,
                                   opt_method_name=params.optimizer)
        self.transfer_variable = HomoLRTransferVariable()
        self.initializer = Initializer()
        self.classes_ = [0, 1]

        self.evaluator = Evaluation()
Ejemplo n.º 9
0
class TestHomoLR(unittest.TestCase):
    def setUp(self):

        self.guest_X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1],
                                 [
                                     2,
                                     2,
                                     3,
                                     1,
                                     1,
                                 ]]) / 10
        self.guest_Y = np.array([[1], [1], [-1]])

        self.values = []
        for idx, x in enumerate(self.guest_X):
            inst = Instance(inst_id=idx, features=x, label=self.guest_Y[idx])
            self.values.append((idx, inst))

        self.host_X = np.array([[1, 1.2, 3.1, 4, 5], [2.3, 2, 4, 5.3, 1],
                                [
                                    2,
                                    2.2,
                                    1.3,
                                    1,
                                    1.6,
                                ]]) / 10
        self.host_Y = np.array([[-1], [1], [-1]])

        self.host_values = []
        for idx, x in enumerate(self.host_X):
            inst = Instance(inst_id=idx, features=x, label=self.host_Y[idx])
            self.values.append((idx, inst))

        self.max_iter = 10
        self.alpha = 0.01
        self.learning_rate = 0.01
        optimizer = 'SGD'
        self.gradient_operator = LogisticGradient()
        self.initializer = Initializer()
        self.fit_intercept = True
        self.init_param_obj = InitParam(fit_intercept=self.fit_intercept)
        self.updater = L2Updater(self.alpha, self.learning_rate)
        self.optimizer = Optimizer(learning_rate=self.learning_rate,
                                   opt_method_name=optimizer)
        self.__init_model()

    def __init_model(self):
        model_shape = self.guest_X.shape[1]
        w = self.initializer.init_model(model_shape,
                                        init_params=self.init_param_obj)
        if self.fit_intercept:
            self.coef_ = w[:-1]
            self.intercept_ = w[-1]
        else:
            self.coef_ = w
            self.intercept_ = 0
        return w

    def __init_host_model(self):
        model_shape = self.host_X.shape[1]
        w = self.initializer.init_model(model_shape,
                                        init_params=self.init_param_obj)
        if self.fit_intercept:
            self.host_coef_ = w[:-1]
            self.host_intercept_ = w[-1]
        else:
            self.host_coef_ = w
            self.host_intercept_ = 0
        return w

    def test_one_iter(self):
        w = self.__init_model()
        print("before training, coef: {}, intercept: {}".format(
            self.coef_, self.intercept_))
        self.assertEqual(self.coef_.shape[0], self.guest_X.shape[1])
        grad, loss = self.gradient_operator.compute(
            self.values,
            coef=self.coef_,
            intercept=self.intercept_,
            fit_intercept=self.fit_intercept)
        loss_norm = self.updater.loss_norm(self.coef_)
        loss = loss + loss_norm
        delta_grad = self.optimizer.apply_gradients(grad)
        self.update_model(delta_grad)
        print("After training, coef: {}, intercept: {}, loss: {}".format(
            self.coef_, self.intercept_, loss))

    def test_multi_iter(self):
        w = self.__init_model()
        loss_hist = [100]
        for iter_num in range(self.max_iter):
            grad, loss = self.gradient_operator.compute(
                self.values,
                coef=self.coef_,
                intercept=self.intercept_,
                fit_intercept=self.fit_intercept)
            loss_norm = self.updater.loss_norm(self.coef_)
            loss = loss + loss_norm
            delta_grad = self.optimizer.apply_gradients(grad)
            self.update_model(delta_grad)
            self.assertTrue(loss <= loss_hist[-1])
            loss_hist.append(loss)
        print(loss_hist)

    def test_host_iter(self):
        w = self.__init_host_model()
        print("before training, coef: {}, intercept: {}".format(
            self.coef_, self.intercept_))
        self.assertEqual(self.host_coef_.shape[0], self.host_X.shape[1])
        grad, loss = self.gradient_operator.compute(
            self.host_values,
            coef=self.host_coef_,
            intercept=self.intercept_,
            fit_intercept=self.fit_intercept)
        loss_norm = self.updater.loss_norm(self.coef_)
        # print("***********************************************")
        # print(loss, loss_norm)
        self.assertTrue(loss is None)

    def update_model(self, gradient):
        LOGGER.debug(
            "In update_model function, shape of coef: {}, shape of gradient: {}"
            .format(np.shape(self.coef_), np.shape(gradient)))
        if self.fit_intercept:
            if self.updater is not None:
                self.coef_ = self.updater.update_coef(self.coef_,
                                                      gradient[:-1])
            else:
                self.coef_ = self.coef_ - gradient[:-1]
            self.intercept_ -= gradient[-1]

        else:
            if self.updater is not None:
                self.coef_ = self.updater.update_coef(self.coef_, gradient)
            else:
                self.coef_ = self.coef_ - gradient