Пример #1
0
    def dot_local(self, other, target_name=None):
        def _vec_dot(x, y, party_idx, q_field, endec):
            ret = np.dot(x, y) % q_field
            ret = endec.truncate(ret, party_idx)
            if not isinstance(ret, np.ndarray):
                ret = np.array([ret])
            return ret

        if isinstance(other, FixedPointTensor) or isinstance(
                other, fixedpoint_numpy.FixedPointTensor):
            other = other.value

        if isinstance(other, np.ndarray):
            party_idx = self.get_spdz().party_idx
            f = functools.partial(_vec_dot,
                                  y=other,
                                  party_idx=party_idx,
                                  q_field=self.q_field,
                                  endec=self.endec)
            ret = self.value.mapValues(f)
            return self._boxed(ret, target_name)

        elif is_table(other):
            ret = table_dot_mod(self.value, other, self.q_field).reshape(
                (1, -1))[0]
            ret = self.endec.truncate(ret, self.get_spdz().party_idx)
            return fixedpoint_numpy.FixedPointTensor(ret, self.q_field,
                                                     self.endec, target_name)
        else:
            raise ValueError(f"type={type(other)}")
Пример #2
0
    def dot(self, other: 'FixedPointTensor', target_name=None):
        spdz = self.get_spdz()
        if target_name is None:
            target_name = NamingService.get_instance().next()

        a, b, c = beaver_triplets(a_tensor=self.value,
                                  b_tensor=other.value,
                                  dot=table_dot,
                                  q_field=self.q_field,
                                  he_key_pair=(spdz.public_key,
                                               spdz.private_key),
                                  communicator=spdz.communicator,
                                  name=target_name)

        x_add_a = (self + a).rescontruct(f"{target_name}_confuse_x")
        y_add_b = (other + b).rescontruct(f"{target_name}_confuse_y")
        cross = c - table_dot_mod(a, y_add_b, self.q_field) - table_dot_mod(
            x_add_a, b, self.q_field)
        if spdz.party_idx == 0:
            cross += table_dot_mod(x_add_a, y_add_b, self.q_field)
        cross = cross % self.q_field
        cross = self.endec.truncate(cross, self.get_spdz().party_idx)
        share = fixedpoint_numpy.FixedPointTensor(cross, self.q_field,
                                                  self.endec, target_name)
        return share
Пример #3
0
    def _not_reveal_every_iter_weights_check(self, last_w, new_w, suffix):
        last_w_self, last_w_remote = last_w
        w_self, w_remote = new_w
        grad_self = w_self - last_w_self
        grad_remote = w_remote - last_w_remote

        if self.role == consts.GUEST:
            grad_encode = np.hstack((grad_remote.value, grad_self.value))
        else:
            grad_encode = np.hstack((grad_self.value, grad_remote.value))

        grad_encode = np.array([grad_encode])

        grad_tensor_name = ".".join(("check_converge_grad", ) + suffix)
        grad_tensor = fixedpoint_numpy.FixedPointTensor(
            value=grad_encode,
            q_field=self.fixedpoint_encoder.n,
            endec=self.fixedpoint_encoder,
            tensor_name=grad_tensor_name)

        grad_tensor_transpose_name = ".".join(
            ("check_converge_grad_transpose", ) + suffix)
        grad_tensor_transpose = fixedpoint_numpy.FixedPointTensor(
            value=grad_encode.T,
            q_field=self.fixedpoint_encoder.n,
            endec=self.fixedpoint_encoder,
            tensor_name=grad_tensor_transpose_name)

        grad_norm_tensor_name = ".".join(("check_converge_grad_norm", ) +
                                         suffix)

        grad_norm = grad_tensor.dot(grad_tensor_transpose,
                                    target_name=grad_norm_tensor_name).get()

        weight_diff = np.sqrt(grad_norm[0][0])
        LOGGER.info("iter: {}, weight_diff:{}, is_converged: {}".format(
            self.n_iter_, weight_diff, self.is_converged))
        is_converge = False
        if weight_diff < self.model_param.tol:
            is_converge = True
        return is_converge
Пример #4
0
    def compute_loss(self, weights, suffix, cipher=None):
        """
          Use Taylor series expand log loss:
          Loss = - y * log(h(x)) - (1-y) * log(1 - h(x)) where h(x) = 1/(1+exp(-wx))
          Then loss' = - (1/N)*∑(log(1/2) - 1/2*wx + ywx -1/8(wx)^2)
        """
        LOGGER.info(f"[compute_loss]: Calculate loss ...")
        wx = (-0.5 * self.encrypted_wx).reduce(operator.add)
        ywx = (self.encrypted_wx * self.labels).reduce(operator.add)

        wx_square = (2 * self.wx_remote * self.wx_self).reduce(operator.add) + \
                    (self.wx_self * self.wx_self).reduce(operator.add)

        wx_remote_square = self.secure_matrix_obj.share_encrypted_matrix(
            suffix=suffix, is_remote=False, cipher=None,
            wx_self_square=None)[0]

        wx_square = (wx_remote_square + wx_square) * -0.125

        batch_num = self.batch_num[int(suffix[2])]
        loss = (wx + ywx + wx_square) * (-1 / batch_num) - np.log(0.5)

        tensor_name = ".".join(("shared_loss", ) + suffix)
        share_loss = SecureMatrix.from_source(
            tensor_name=tensor_name,
            source=loss,
            cipher=None,
            q_field=self.fixedpoint_encoder.n,
            encoder=self.fixedpoint_encoder)

        tensor_name = ".".join(("loss", ) + suffix)
        loss = share_loss.get(tensor_name=tensor_name, broadcast=False)[0]

        if self.reveal_every_iter:
            loss_norm = self.optimizer.loss_norm(weights)
            if loss_norm:
                loss += loss_norm
        else:
            if self.optimizer.penalty == consts.L2_PENALTY:
                w_self, w_remote = weights

                w_encode = np.hstack((w_remote.value, w_self.value))

                w_encode = np.array([w_encode])

                w_tensor_name = ".".join(("loss_norm_w", ) + suffix)
                w_tensor = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_name)

                w_tensor_transpose_name = ".".join(
                    ("loss_norm_w_transpose", ) + suffix)
                w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode.T,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_transpose_name)

                loss_norm_tensor_name = ".".join(("loss_norm", ) + suffix)

                loss_norm = w_tensor.dot(
                    w_tensor_transpose,
                    target_name=loss_norm_tensor_name).get(broadcast=False)
                loss_norm = 0.5 * self.optimizer.alpha * loss_norm[0][0]
                loss = loss + loss_norm

        LOGGER.info(
            f"[compute_loss]: loss={loss}, reveal_every_iter={self.reveal_every_iter}"
        )

        return loss
Пример #5
0
    def compute_loss(self, weights=None, suffix=None, cipher=None):
        """
          Use Taylor series expand log loss:
          Loss = - y * log(h(x)) - (1-y) * log(1 - h(x)) where h(x) = 1/(1+exp(-wx))
          Then loss' = - (1/N)*∑(log(1/2) - 1/2*wx + ywx - 1/8(wx)^2)
        """
        LOGGER.info(f"[compute_loss]: Calculate loss ...")
        wx_self_square = (self.wx_self * self.wx_self).reduce(operator.add)

        self.secure_matrix_obj.share_encrypted_matrix(
            suffix=suffix,
            is_remote=True,
            cipher=cipher,
            wx_self_square=wx_self_square)

        tensor_name = ".".join(("shared_loss", ) + suffix)
        share_loss = SecureMatrix.from_source(
            tensor_name=tensor_name,
            source=self.other_party,
            cipher=cipher,
            q_field=self.fixedpoint_encoder.n,
            encoder=self.fixedpoint_encoder,
            is_fixedpoint_table=False)

        if self.reveal_every_iter:
            loss_norm = self.optimizer.loss_norm(weights)
            if loss_norm:
                share_loss += loss_norm
            LOGGER.debug(f"share_loss+loss_norm: {share_loss}")
            tensor_name = ".".join(("loss", ) + suffix)
            share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
        else:
            tensor_name = ".".join(("loss", ) + suffix)
            share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
            if self.optimizer.penalty == consts.L2_PENALTY:
                w_self, w_remote = weights

                w_encode = np.hstack((w_self.value, w_remote.value))

                w_encode = np.array([w_encode])

                w_tensor_name = ".".join(("loss_norm_w", ) + suffix)
                w_tensor = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_name)

                w_tensor_transpose_name = ".".join(
                    ("loss_norm_w_transpose", ) + suffix)
                w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode.T,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_transpose_name)

                loss_norm_tensor_name = ".".join(("loss_norm", ) + suffix)

                loss_norm = w_tensor.dot(w_tensor_transpose,
                                         target_name=loss_norm_tensor_name)
                loss_norm.broadcast_reconstruct_share()
Пример #6
0
    def compute_loss(self,
                     weights=None,
                     labels=None,
                     suffix=None,
                     cipher=None):
        """
         Compute hetero linr loss:
            loss = (1/N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
            (wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2 * (wx_h * (wx_g - y))
        """
        LOGGER.info(f"[compute_loss]: Calculate loss ...")
        wx_self_square = (self.wx_self * self.wx_self).reduce(operator.add)

        self.secure_matrix_obj.share_encrypted_matrix(
            suffix=suffix,
            is_remote=True,
            cipher=cipher,
            wx_self_square=wx_self_square)

        tensor_name = ".".join(("shared_loss", ) + suffix)
        share_loss = SecureMatrix.from_source(
            tensor_name=tensor_name,
            source=self.other_party,
            cipher=cipher,
            q_field=self.fixedpoint_encoder.n,
            encoder=self.fixedpoint_encoder,
            is_fixedpoint_table=False)

        if self.reveal_every_iter:
            loss_norm = self.optimizer.loss_norm(weights)
            if loss_norm:
                share_loss += loss_norm
            LOGGER.debug(f"share_loss+loss_norm: {share_loss}")
            tensor_name = ".".join(("loss", ) + suffix)
            share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
        else:
            tensor_name = ".".join(("loss", ) + suffix)
            share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
            if self.optimizer.penalty == consts.L2_PENALTY:
                w_self, w_remote = weights

                w_encode = np.hstack((w_self.value, w_remote.value))

                w_encode = np.array([w_encode])

                w_tensor_name = ".".join(("loss_norm_w", ) + suffix)
                w_tensor = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_name)

                w_tensor_transpose_name = ".".join(
                    ("loss_norm_w_transpose", ) + suffix)
                w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode.T,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_transpose_name)

                loss_norm_tensor_name = ".".join(("loss_norm", ) + suffix)

                loss_norm = w_tensor.dot(w_tensor_transpose,
                                         target_name=loss_norm_tensor_name)
                loss_norm.broadcast_reconstruct_share()
Пример #7
0
 def reduce(self, func, **kwargs):
     ret = self.value.reduce(func)
     return fixedpoint_numpy.FixedPointTensor(ret, self.q_field, self.endec)
Пример #8
0
    def compute_loss(self, weights, labels, suffix, cipher=None):
        """
         Compute hetero linr loss:
            loss = (1/N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
            (wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2 * (wx_h * (wx_g - y))
        """
        LOGGER.info(f"[compute_loss]: Calculate loss ...")
        wxy_self = self.wx_self - labels
        wxy_self_square = (wxy_self * wxy_self).reduce(operator.add)

        wxy = (self.wx_remote * wxy_self).reduce(operator.add)
        wx_remote_square = self.secure_matrix_obj.share_encrypted_matrix(
            suffix=suffix, is_remote=False, cipher=None,
            wx_self_square=None)[0]
        loss = (wx_remote_square + wxy_self_square) + wxy * 2

        batch_num = self.batch_num[int(suffix[2])]
        loss = loss * (1 / (batch_num * 2))
        # loss = (wx_remote_square + wxy_self_square + 2 * wxy) / (2 * batch_num)

        tensor_name = ".".join(("shared_loss", ) + suffix)
        share_loss = SecureMatrix.from_source(
            tensor_name=tensor_name,
            source=loss,
            cipher=None,
            q_field=self.fixedpoint_encoder.n,
            encoder=self.fixedpoint_encoder)

        tensor_name = ".".join(("loss", ) + suffix)
        loss = share_loss.get(tensor_name=tensor_name, broadcast=False)[0]

        if self.reveal_every_iter:
            loss_norm = self.optimizer.loss_norm(weights)
            if loss_norm:
                loss += loss_norm
        else:
            if self.optimizer.penalty == consts.L2_PENALTY:
                w_self, w_remote = weights

                w_encode = np.hstack((w_remote.value, w_self.value))

                w_encode = np.array([w_encode])

                w_tensor_name = ".".join(("loss_norm_w", ) + suffix)
                w_tensor = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_name)

                w_tensor_transpose_name = ".".join(
                    ("loss_norm_w_transpose", ) + suffix)
                w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(
                    value=w_encode.T,
                    q_field=self.fixedpoint_encoder.n,
                    endec=self.fixedpoint_encoder,
                    tensor_name=w_tensor_transpose_name)

                loss_norm_tensor_name = ".".join(("loss_norm", ) + suffix)

                loss_norm = w_tensor.dot(
                    w_tensor_transpose,
                    target_name=loss_norm_tensor_name).get(broadcast=False)
                loss_norm = 0.5 * self.optimizer.alpha * loss_norm[0][0]
                loss = loss + loss_norm

        LOGGER.info(
            f"[compute_loss]: loss={loss}, reveal_every_iter={self.reveal_every_iter}"
        )

        return loss