Beispiel #1
0
 def aggregate_forward(self, host_forward):
     """
     Compute e_guest.dot(e_host)
     Paramters
     ---------
     host_forward: DTable. key, en_e(host)
     """
     aggregate_forward_res = self.guest_forward.join(
         host_forward, lambda e1, e2:
         (fate_operator.dot(e1[1], e2[1]),
          math.pow(fate_operator.dot(e1[1], e2[1]), 2)))
     return aggregate_forward_res
def __compute_partition_gradient(data, fit_intercept=True):
    """
    Compute hetero regression gradient for:
    gradient = ∑d*x, where d is fore_gradient which differ from different algorithm
    Parameters
    ----------
    data: DTable, include fore_gradient and features
    fit_intercept: bool, if model has interception or not. Default True

    Returns
    ----------
    numpy.ndarray
        hetero regression model gradient
    """
    feature = []
    fore_gradient = []

    for key, value in data:
        feature.append(value[0])
        fore_gradient.append(value[1])
    feature = np.array(feature)
    fore_gradient = np.array(fore_gradient)

    gradient = []
    if feature.shape[0] <= 0:
        return 0
    for j in range(feature.shape[1]):
        feature_col = feature[:, j]
        gradient_j = fate_operator.dot(feature_col, fore_gradient)
        gradient.append(gradient_j)

    if fit_intercept:
        bias_grad = np.sum(fore_gradient)
        gradient.append(bias_grad)
    return np.array(gradient)
Beispiel #3
0
    def __compute_gradient(data, fit_intercept=True):
        feature = []
        fore_gradient = []

        for key, value in data:
            feature.append(value[0])
            fore_gradient.append(value[1])
        feature = np.array(feature)
        fore_gradient = np.array(fore_gradient)

        gradient = []
        if feature.shape[0] <= 0:
            return 0
        for j in range(feature.shape[1]):
            feature_col = feature[:, j]
            gradient_j = fate_operator.dot(feature_col, fore_gradient)
            gradient_j /= feature.shape[0]
            gradient.append(gradient_j)

        if fit_intercept:
            fore_gradient_size = fore_gradient.shape[0]
            fore_gradient = fore_gradient / fore_gradient_size
            bias_grad = np.sum(fore_gradient)
            gradient.append(bias_grad)

        return np.array(gradient)
Beispiel #4
0
    def compute(self, values, coef, intercept, fit_intercept):
        X, Y = self.load_data(values)
        batch_size = len(X)
        if batch_size == 0:
            return None, None

        one_d_y = Y.reshape([-1, ])
        d = (0.25 * np.array(fate_operator.dot(X, coef) + intercept).transpose() + 0.5 * one_d_y * -1)

        grad_batch = X.transpose() * d
        grad_batch = grad_batch.transpose()
        if fit_intercept:
            grad_batch = np.c_[grad_batch, d]
        # grad = sum(grad_batch) / batch_size
        grad = sum(grad_batch)
        return grad, None
Beispiel #5
0
    def compute_gradient(values, coef, intercept, fit_intercept):
        LOGGER.debug("Get in compute_gradient")
        X, Y = load_data(values)
        batch_size = len(X)
        if batch_size == 0:
            return None

        one_d_y = Y.reshape([-1, ])
        d = (0.25 * np.array(fate_operator.dot(X, coef) + intercept).transpose() + 0.5 * one_d_y * -1)

        grad_batch = X.transpose() * d
        grad_batch = grad_batch.transpose()
        if fit_intercept:
            grad_batch = np.c_[grad_batch, d]
        grad = sum(grad_batch)
        LOGGER.debug("Finish compute_gradient")
        return grad
    def _test_compute(self, X, Y, coef, intercept, fit_intercept):
        batch_size = len(X)
        if batch_size == 0:
            return None, None

        one_d_y = Y.reshape([-1, ])

        d = (0.25 * np.array(fate_operator.dot(X, coef) + intercept).transpose() + 0.5 * one_d_y * -1)
        grad_batch = X.transpose() * d

        tot_loss = np.log(1 + np.exp(np.multiply(-Y.transpose(), X.dot(coef) + intercept))).sum()
        avg_loss = tot_loss / Y.shape[0]

        # grad_batch = grad_batch.transpose()
        # if fit_intercept:
        #     grad_batch = np.c_[grad_batch, d]
        # grad = sum(grad_batch) / batch_size
        return 0
Beispiel #7
0
    def __compute_gradient(data, fit_intercept=True):
        """
        Compute hetero-lr gradient for:
        gradient = ∑(1/2*ywx-1)*1/2yx, where fore_gradient = (1/2*ywx-1)*1/2y has been computed, x is features
        Parameters
        ----------
        data: DTable, include fore_gradient and features
        fit_intercept: bool, if hetero-lr has interception or not. Default True

        Returns
        ----------
        numpy.ndarray
            hetero-lr gradient
        """
        feature = []
        fore_gradient = []

        for key, value in data:
            feature.append(value[0])
            fore_gradient.append(value[1])
        feature = np.array(feature)
        fore_gradient = np.array(fore_gradient)

        gradient = []
        if feature.shape[0] <= 0:
            return 0
        for j in range(feature.shape[1]):
            feature_col = feature[:, j]
            gradient_j = fate_operator.dot(feature_col, fore_gradient)
            gradient.append(gradient_j)

        if fit_intercept:
            bias_grad = np.sum(fore_gradient)
            gradient.append(bias_grad)
        gradient.append(feature.shape[0])
        return np.array(gradient)
Beispiel #8
0
 def compute_wx(self, data_instances, coef_, intercept_=0):
     return data_instances.mapValues(
         lambda v: fate_operator.dot(v.features, coef_) + intercept_)
def __compute_partition_gradient(data, fit_intercept=True, is_sparse=False):
    """
    Compute hetero regression gradient for:
    gradient = ∑d*x, where d is fore_gradient which differ from different algorithm
    Parameters
    ----------
    data: DTable, include fore_gradient and features
    fit_intercept: bool, if model has interception or not. Default True

    Returns
    ----------
    numpy.ndarray
        hetero regression model gradient
    """
    feature = []
    fore_gradient = []

    if is_sparse:
        row_indice = []
        col_indice = []
        data_value = []

        row = 0
        feature_shape = None
        for key, (sparse_features, d) in data:
            fore_gradient.append(d)
            assert isinstance(sparse_features, SparseVector)
            if feature_shape is None:
                feature_shape = sparse_features.get_shape()
            for idx, v in sparse_features.get_all_data():
                col_indice.append(idx)
                row_indice.append(row)
                data_value.append(v)
            row += 1
        if feature_shape is None or feature_shape == 0:
            return 0
        sparse_matrix = sp.csr_matrix((data_value, (row_indice, col_indice)),
                                      shape=(row, feature_shape))
        fore_gradient = np.array(fore_gradient)

        # gradient = sparse_matrix.transpose().dot(fore_gradient).tolist()
        gradient = fate_operator.dot(sparse_matrix.transpose(),
                                     fore_gradient).tolist()
        if fit_intercept:
            bias_grad = np.sum(fore_gradient)
            gradient.append(bias_grad)
            # LOGGER.debug("In first method, gradient: {}, bias_grad: {}".format(gradient, bias_grad))
        return np.array(gradient)

    else:
        for key, value in data:
            feature.append(value[0])
            fore_gradient.append(value[1])
        feature = np.array(feature)
        fore_gradient = np.array(fore_gradient)
        if feature.shape[0] <= 0:
            return 0

        gradient = fate_operator.dot(feature.transpose(), fore_gradient)
        gradient = gradient.tolist()
        if fit_intercept:
            bias_grad = np.sum(fore_gradient)
            gradient.append(bias_grad)
        return np.array(gradient)