Esempio n. 1
0
    def __init__(self, logistic_params):
        super(HeteroLRArbiter, self).__init__(logistic_params)
        self.converge_func = DiffConverge(logistic_params.eps)

        # attribute
        self.pre_loss = None
        self.batch_num = None
        self.transfer_variable = HeteroLRTransferVariable()
        self.optimizer = Optimizer(logistic_params.learning_rate,
                                   logistic_params.optimizer)
        self.key_length = logistic_params.encrypt_param.key_length
Esempio n. 2
0
    def __init__(self, network_embedding_params: NetworkEmbeddingParam):
        super(HeteroNEArbiter, self).__init__(network_embedding_params)
        self.converge_func = DiffConverge(network_embedding_params.eps)

        # attribute
        self.pre_loss = None
        self.batch_num = None
        self.transfer_variable = HeteroNETransferVariable()
        self.optimizer = Optimizer(network_embedding_params.learning_rate, 
                                   network_embedding_params.optimizer)
        
        self.key_length = network_embedding_params.encrypt_param.key_length
Esempio n. 3
0
 def test_diff_converge(self):
     loss = 50
     eps = 0.00001
     converge_func = DiffConverge(eps=eps)
     iter_num = 0
     pre_loss = loss
     while iter_num < 500:
         loss *= 0.5
         converge_flag = converge_func.is_converge(loss)
         if converge_flag:
             break
         iter_num += 1
         pre_loss = loss
     self.assertTrue(math.fabs(pre_loss - loss) <= eps)
Esempio n. 4
0
 def __init__(self, guest: PlainFTLGuestModel, model_param: FTLModelParam,
              transfer_variable: HeteroFTLTransferVariable):
     super(HeteroFTLGuest, self).__init__()
     self.guest_model = guest
     self.model_param = model_param
     self.transfer_variable = transfer_variable
     self.max_iter = model_param.max_iter
     self.n_iter_ = 0
     self.converge_func = DiffConverge(eps=model_param.eps)
Esempio n. 5
0
class HeteroLRArbiter(BaseLogisticRegression):
    def __init__(self, logistic_params):
        # LogisticParamChecker.check_param(logistic_params)
        super(HeteroLRArbiter, self).__init__(logistic_params)
        self.converge_func = DiffConverge(logistic_params.eps)

        # attribute
        self.pre_loss = None
        self.batch_num = None
        self.transfer_variable = HeteroLRTransferVariable()
        self.optimizer = Optimizer(logistic_params.learning_rate,
                                   logistic_params.optimizer)
        self.key_length = logistic_params.encrypt_param.key_length

    def perform_subtasks(self, **training_info):
        """
        performs any tasks that the arbiter is responsible for.

        This 'perform_subtasks' function serves as a handler on conducting any task that the arbiter is responsible
        for. For example, for the 'perform_subtasks' function of 'HeteroDNNLRArbiter' class located in
        'hetero_dnn_lr_arbiter.py', it performs some works related to updating/training local neural networks of guest
        or host.

        For this particular class (i.e., 'HeteroLRArbiter') that serves as a base arbiter class for neural-networks-based
        hetero-logistic-regression model, the 'perform_subtasks' function will do nothing. In other words, no subtask is
        performed by this arbiter.

        :param training_info: a dictionary holding training information
        """
        pass

    def fit(self, data_instances=None):
        """
        Train lr model of role arbiter
        Parameters
        ----------
        data_instances: DTable of Instance, input data
        """

        LOGGER.info("Enter hetero_lr_arbiter fit")
        if data_instances:
            # self.header = data_instance.schema.get('header')
            self.header = self.get_header(data_instances)
        else:
            self.header = []

        # Generate encrypt keys
        self.encrypt_operator.generate_key(self.key_length)
        public_key = self.encrypt_operator.get_public_key()
        public_key = public_key
        LOGGER.info("public_key:{}".format(public_key))

        # remote is to send an object to other party
        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(
                              self.transfer_variable.paillier_pubkey),
                          role=consts.HOST,
                          idx=0)
        LOGGER.info("remote public_key to host")

        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(
                              self.transfer_variable.paillier_pubkey),
                          role=consts.GUEST,
                          idx=0)
        LOGGER.info("remote public_key to guest")

        # get method will block until the remote object is fetched.
        batch_info = federation.get(
            name=self.transfer_variable.batch_info.name,
            tag=self.transfer_variable.generate_transferid(
                self.transfer_variable.batch_info),
            idx=0)
        LOGGER.info("Get batch_info from guest:{}".format(batch_info))
        self.batch_num = batch_info["batch_num"]

        is_stop = False
        self.n_iter_ = 0
        while self.n_iter_ < self.max_iter:
            LOGGER.info("iter:{}".format(self.n_iter_))
            batch_index = 0
            iter_loss = 0
            while batch_index < self.batch_num:
                LOGGER.info("batch:{}".format(batch_index))
                host_gradient = federation.get(
                    name=self.transfer_variable.host_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.host_gradient, self.n_iter_,
                        batch_index),
                    idx=0)
                LOGGER.info("Get host_gradient from Host")

                guest_gradient = federation.get(
                    name=self.transfer_variable.guest_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.guest_gradient, self.n_iter_,
                        batch_index),
                    idx=0)
                LOGGER.info("Get guest_gradient from Guest")

                # aggregate gradient
                host_gradient, guest_gradient = np.array(
                    host_gradient), np.array(guest_gradient)
                gradient = np.hstack((host_gradient, guest_gradient))

                LOGGER.info("gradient shape={}".format(gradient.shape))

                # decrypt gradient
                for i in range(gradient.shape[0]):
                    gradient[i] = self.encrypt_operator.decrypt(gradient[i])

                # optimization
                optim_gradient = self.optimizer.apply_gradients(gradient)

                # separate optim_gradient according gradient size of Host and Guest
                separate_optim_gradient = HeteroFederatedAggregator.separate(
                    optim_gradient,
                    [host_gradient.shape[0], guest_gradient.shape[0]])
                host_optim_gradient = separate_optim_gradient[0]
                guest_optim_gradient = separate_optim_gradient[1]

                LOGGER.info("host data feature dims:{}".format(
                    np.array(host_optim_gradient).shape[0]))
                LOGGER.info("guest data feature dims:{}".format(
                    np.array(guest_optim_gradient).shape[0]))

                federation.remote(
                    host_optim_gradient,
                    name=self.transfer_variable.host_optim_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.host_optim_gradient,
                        self.n_iter_, batch_index),
                    role=consts.HOST,
                    idx=0)
                LOGGER.info("Remote host_optim_gradient to Host")

                federation.remote(
                    guest_optim_gradient,
                    name=self.transfer_variable.guest_optim_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.guest_optim_gradient,
                        self.n_iter_, batch_index),
                    role=consts.GUEST,
                    idx=0)
                LOGGER.info("Remote guest_optim_gradient to Guest")

                training_info = {
                    "iteration": self.n_iter_,
                    "batch_index": batch_index
                }
                self.perform_subtasks(**training_info)

                loss = federation.get(
                    name=self.transfer_variable.loss.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.loss, self.n_iter_,
                        batch_index),
                    idx=0)

                de_loss = self.encrypt_operator.decrypt(loss)
                iter_loss += de_loss
                # LOGGER.info("Get loss from guest:{}".format(de_loss))

                batch_index += 1

            # if converge
            loss = iter_loss / self.batch_num
            LOGGER.info("iter loss:{}".format(loss))
            if self.converge_func.is_converge(loss):
                is_stop = True

            federation.remote(is_stop,
                              name=self.transfer_variable.is_stopped.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.is_stopped,
                                  self.n_iter_, batch_index),
                              role=consts.HOST,
                              idx=0)
            LOGGER.info("Remote is_stop to host:{}".format(is_stop))

            federation.remote(is_stop,
                              name=self.transfer_variable.is_stopped.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.is_stopped,
                                  self.n_iter_, batch_index),
                              role=consts.GUEST,
                              idx=0)
            LOGGER.info("Remote is_stop to guest:{}".format(is_stop))

            self.n_iter_ += 1
            if is_stop:
                LOGGER.info("Model is converged, iter:{}".format(self.n_iter_))
                break

        LOGGER.info(
            "Reach max iter {} or converge, train model finish!".format(
                self.max_iter))
Esempio n. 6
0
class HeteroNEArbiter(BaseNetworkEmbeddig):
    def __init__(self, network_embedding_params: NetworkEmbeddingParam):
        super(HeteroNEArbiter, self).__init__(network_embedding_params)
        self.converge_func = DiffConverge(network_embedding_params.eps)

        # attribute
        self.pre_loss = None
        self.batch_num = None
        self.transfer_variable = HeteroNETransferVariable()
        self.optimizer = Optimizer(network_embedding_params.learning_rate, 
                                   network_embedding_params.optimizer)
        
        self.key_length = network_embedding_params.encrypt_param.key_length

    def perform_subtasks(self, **training_info):
        """
        performs any tasks that the arbiter is responsible for.

        This 'perform_subtasks' function serves as a handler on conducting any task that the arbiter is responsible
        for. For example, for the 'perform_subtasks' function of 'HeteroDNNLRArbiter' class located in
        'hetero_dnn_lr_arbiter.py', it performs some works related to updating/training local neural networks of guest
        or host.

        For this particular class (i.e., 'HeteroLRArbiter') that serves as a base arbiter class for neural-networks-based
        hetero-logistic-regression model, the 'perform_subtasks' function will do nothing. In other words, no subtask is
        performed by this arbiter.

        :param training_info: a dictionary holding training information
        """
        pass

    def fit(self, data_instances=None, node2id=None, local_instances=None, common_nodes=None):
        """
        Train network embedding of role arbiter
        Parameters
        ----------
        data_instances: DTable of Instance, input data
        """

        LOGGER.info("Enter hetero_ne_arbiter fit")
        
        # data_instance handele ?

        # Generate encrypt keys
        self.encrypt_operator.generate_key(self.key_length)
        public_key = self.encrypt_operator.get_public_key()
        LOGGER.info("public_key: {}".format(public_key))

        # remote public key to host and guest
        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),
                          role=consts.HOST,
                          idx=0)
        LOGGER.info("remote publick_key to host")

        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),
                          role=consts.GUEST,
                          idx=0)
        LOGGER.info("remote public_key to guest")

        batch_info = federation.get(name=self.transfer_variable.batch_info.name,
                                    tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),
                                    idx=0)
        LOGGER.info("Get batch_info from guest: {}".format(batch_info))
        self.batch_num = batch_info['batch_num']
        
        is_stop = False
        self.n_iter_ = 0

        while self.n_iter_ < self.max_iter:
            LOGGER.info("iter: {}".format(self.n_iter_))

             #######
            # Horizontally learning
            host_common_embedding = federation.get(name=self.transfer_variable.host_common_embedding.name,
                                                   tag=self.transfer_variable.generate_transferid(
                                                       self.transfer_variable.host_common_embedding,
                                                       self.n_iter_,
                                                       0
                                                   ),
                                                   idx=0)
            guest_common_embedding = federation.get(name=self.transfer_variable.guest_common_embedding.name,
                                                    tag=self.transfer_variable.generate_transferid(
                                                        self.transfer_variable.guest_common_embedding,
                                                        self.n_iter_,
                                                        0
                                                    ),
                                                    idx=0)

            common_embedding =  host_common_embedding.join(guest_common_embedding, lambda host, guest: (host + guest) / 2)

            federation.remote(common_embedding,
                              name=self.transfer_variable.common_embedding.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.common_embedding,
                                  self.n_iter_,
                                  0
                              ),
                              role=consts.HOST,
                              idx=0)
            
            federation.remote(common_embedding,
                              name=self.transfer_variable.common_embedding.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.common_embedding,
                                  self.n_iter_,
                                  0
                              ),
                              role=consts.GUEST,
                              idx=0)

            LOGGER.info("Iter {}, horizontally learning finish".format(self.n_iter_))

            #######

            batch_index = 0
            iter_loss = 0

            while batch_index < self.batch_num:
                LOGGER.info("batch: {}".format(batch_index))

                # host_gradient shape = (batch_size, dim)
                host_gradient = federation.get(name=self.transfer_variable.host_gradient.name,
                                             tag=self.transfer_variable.generate_transferid( 
                                                 self.transfer_variable.host_gradient, self.n_iter_, batch_index),
                                             idx=0
                                             )
                LOGGER.info("Get host_gradient from GUEST")

                # guest_gradient DTable key = sample_id, value=gradient(shape=(batch_size, dim))
                guest_gradient = federation.get(name=self.transfer_variable.guest_gradient.name,
                                                tag=self.transfer_variable.generate_transferid(
                                                    self.transfer_variable.guest_gradient, self.n_iter_, batch_index),
                                                idx=0)
                LOGGER.info("Get guest_gradient from GUEST")

                
                #host_gradient, guest_gradient = np.array(host_gradient), np.array(guest_gradient)
                #gradient = np.vstack((host_gradient, guest_gradient))
                
                # if the gradients are encrypted, remember to decrypt
                # for i in range(gradient.shape[0]):
                #     for j in range(gradient.shape[1]):
                #         gradient[i, j] = self.encrypt_operator.decrypt(gradient[i, j])
                
                #optim_gradient = self.optimizer.apply_gradients(gradient)

                #host_optim_gradient = optim_gradient[: host_gradient.shape[0], :]
                #guest_optim_gradient = optim_gradient[host_gradient.shape[0]:, :]
                host_optim_gradient = host_gradient.mapValues(self.optimizer.apply_gradients)
                guest_optim_gradient = guest_gradient.mapValues(self.optimizer.apply_gradients)

                LOGGER.info("host gradients number: {}".format(host_optim_gradient.count()))
                LOGGER.info("guest gradients number: {}".format(guest_optim_gradient.count()))  

                federation.remote(host_optim_gradient,
                                  name=self.transfer_variable.host_optim_gradient.name,
                                  tag=self.transfer_variable.generate_transferid(
                                      self.transfer_variable.host_optim_gradient, 
                                      self.n_iter_, 
                                      batch_index
                                  ),
                                  role=consts.HOST,
                                  idx=0)
                LOGGER.info("Remote host_optim_gradient to Host")

                federation.remote(guest_optim_gradient,
                                  name=self.transfer_variable.guest_optim_gradient.name,
                                  tag=self.transfer_variable.generate_transferid(
                                      self.transfer_variable.guest_optim_gradient,
                                      self.n_iter_,
                                      batch_index 
                                  ),
                                  role=consts.GUEST,
                                  idx=0)
                LOGGER.info("Remote guest_optim_gradient to Guest")
                
                training_info = {"iteration": self.n_iter_, "batch_index": batch_index}
                self.perform_subtasks(**training_info)

                loss = federation.get(name=self.transfer_variable.loss.name,
                                      tag=self.transfer_variable.generate_transferid(
                                          self.transfer_variable.loss,
                                          self.n_iter_,
                                          batch_index
                                      ),
                                      idx=0)

                #de_loss = self.encrypt_operator.decrypt(loss)
                de_loss = loss
                LOGGER.info("Get loss from guest: {}".format(de_loss))
                iter_loss += de_loss


                batch_index += 1
            
            loss = iter_loss / self.batch_num
            LOGGER.info("iter loss: {}".format(loss))

            ########
            host_common_embedding = federation.get(name=self.transfer_variable.host_common_embedding.name,
                                                   tag=self.transfer_variable.generate_transferid(
                                                       self.transfer_variable.host_common_embedding,
                                                       self.n_iter_,
                                                       1
                                                   ),
                                                   idx=0)
            guest_common_embedding = federation.get(name=self.transfer_variable.guest_common_embedding.name,
                                                    tag=self.transfer_variable.generate_transferid(
                                                        self.transfer_variable.guest_common_embedding,
                                                        self.n_iter_,
                                                        1
                                                    ),
                                                    idx=0)

            common_embedding =  host_common_embedding.join(guest_common_embedding, lambda host, guest: (host + guest) / 2)

            federation.remote(common_embedding,
                              name=self.transfer_variable.common_embedding.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.common_embedding,
                                  self.n_iter_,
                                  1
                              ),
                              role=consts.HOST,
                              idx=0)
            
            federation.remote(common_embedding,
                              name=self.transfer_variable.common_embedding.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.common_embedding,
                                  self.n_iter_,
                                  1
                              ),
                              role=consts.GUEST,
                              idx=0)
            ########


            if self.converge_func.is_converge(loss):
                is_stop = True
            
            federation.remote(is_stop,
                              name=self.transfer_variable.is_stopped.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.is_stopped,
                                  self.n_iter_
                              ),
                              role=consts.HOST,
                              idx=0)
            LOGGER.info("Remote is_stop to host: {}".format(is_stop))

            federation.remote(is_stop,
                              name=self.transfer_variable.is_stopped.name,
                              tag=self.transfer_variable.generate_transferid(
                                  self.transfer_variable.is_stopped,
                                  self.n_iter_
                              ),
                              role=consts.GUEST,
                              idx=0)
            LOGGER.info("Remote is_stop to guest: {}".format(is_stop))

            self.n_iter_ += 1
            if is_stop:
                LOGGER.info("Model is converged, iter: {}".format(self.n_iter_))
                break
        LOGGER.info("Reach max iter {} or convergence, train model finish!".format(self.max_iter))
Esempio n. 7
0
class HeteroLRArbiter(BaseLogisticRegression):
    def __init__(self, logistic_params):
        super(HeteroLRArbiter, self).__init__(logistic_params)
        self.converge_func = DiffConverge(logistic_params.eps)

        # attribute
        self.pre_loss = None
        self.batch_num = None
        self.transfer_variable = HeteroLRTransferVariable()
        self.optimizer = Optimizer(logistic_params.learning_rate,
                                   logistic_params.optimizer)
        self.key_length = logistic_params.encrypt_param.key_length

    def fit(self, data_instance=None):
        # Generate encrypt keys
        self.encrypt_operator.generate_key(self.key_length)
        public_key = self.encrypt_operator.get_public_key()
        public_key = public_key
        LOGGER.info("public_key:{}".format(public_key))
        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(
                              self.transfer_variable.paillier_pubkey),
                          role=consts.HOST,
                          idx=0)
        LOGGER.info("remote public_key to host")

        federation.remote(public_key,
                          name=self.transfer_variable.paillier_pubkey.name,
                          tag=self.transfer_variable.generate_transferid(
                              self.transfer_variable.paillier_pubkey),
                          role=consts.GUEST,
                          idx=0)
        LOGGER.info("remote public_key to guest")

        batch_info = federation.get(
            name=self.transfer_variable.batch_info.name,
            tag=self.transfer_variable.generate_transferid(
                self.transfer_variable.batch_info),
            idx=0)
        LOGGER.info("Get batch_info from guest:{}".format(batch_info))
        self.batch_num = batch_info["batch_num"]

        is_stop = False
        self.n_iter_ = 0
        while self.n_iter_ < self.max_iter:
            LOGGER.info("iter:{}".format(self.n_iter_))
            batch_index = 0
            while batch_index < self.batch_num:
                LOGGER.info("batch:{}".format(batch_index))
                host_gradient = federation.get(
                    name=self.transfer_variable.host_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.host_gradient, self.n_iter_,
                        batch_index),
                    idx=0)
                LOGGER.info("Get host_gradient from Host")
                guest_gradient = federation.get(
                    name=self.transfer_variable.guest_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.guest_gradient, self.n_iter_,
                        batch_index),
                    idx=0)
                LOGGER.info("Get guest_gradient from Guest")

                # aggregate gradient
                host_gradient, guest_gradient = np.array(
                    host_gradient), np.array(guest_gradient)
                gradient = np.hstack(
                    (np.array(host_gradient), np.array(guest_gradient)))
                # decrypt gradient
                for i in range(gradient.shape[0]):
                    gradient[i] = self.encrypt_operator.decrypt(gradient[i])

                # optimization
                optim_gradient = self.optimizer.apply_gradients(gradient)
                # separate optim_gradient according gradient size of Host and Guest
                separate_optim_gradient = HeteroFederatedAggregator.separate(
                    optim_gradient,
                    [host_gradient.shape[0], guest_gradient.shape[0]])
                host_optim_gradient = separate_optim_gradient[0]
                guest_optim_gradient = separate_optim_gradient[1]

                federation.remote(
                    host_optim_gradient,
                    name=self.transfer_variable.host_optim_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.host_optim_gradient,
                        self.n_iter_, batch_index),
                    role=consts.HOST,
                    idx=0)
                LOGGER.info("Remote host_optim_gradient to Host")

                federation.remote(
                    guest_optim_gradient,
                    name=self.transfer_variable.guest_optim_gradient.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.guest_optim_gradient,
                        self.n_iter_, batch_index),
                    role=consts.GUEST,
                    idx=0)
                LOGGER.info("Remote guest_optim_gradient to Guest")

                loss = federation.get(
                    name=self.transfer_variable.loss.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.loss, self.n_iter_,
                        batch_index),
                    idx=0)

                de_loss = self.encrypt_operator.decrypt(loss)
                LOGGER.info("Get loss from guest:{}".format(de_loss))
                # if converge
                if self.converge_func.is_converge(de_loss):
                    is_stop = True

                federation.remote(
                    is_stop,
                    name=self.transfer_variable.is_stopped.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.is_stopped, self.n_iter_,
                        batch_index),
                    role=consts.HOST,
                    idx=0)
                LOGGER.info("Remote is_stop to guest:{}".format(is_stop))

                federation.remote(
                    is_stop,
                    name=self.transfer_variable.is_stopped.name,
                    tag=self.transfer_variable.generate_transferid(
                        self.transfer_variable.is_stopped, self.n_iter_,
                        batch_index),
                    role=consts.GUEST,
                    idx=0)
                LOGGER.info("Remote is_stop to guest:".format(is_stop))

                batch_index += 1
                if is_stop:
                    LOGGER.info("Model is converged, iter:{}".format(
                        self.n_iter_))
                    break

            self.n_iter_ += 1
            if is_stop:
                break

        LOGGER.info("Reach max iter {}, train model finish!".format(
            self.max_iter))