def __precompute(self): # compute a component of host's loss gradient enc_uB_overlap_ex = np.expand_dims(self.enc_uB_overlap, axis=1) enc_uB_overlap_y_overlap_2_phi_2 = encrypt_matmul_3( enc_uB_overlap_ex, self.y_overlap_2_phi_2) self.precomputed_component = np.squeeze( enc_uB_overlap_y_overlap_2_phi_2, axis=1)
def _update_gradients(self): uB_overlap_ex = np.expand_dims(self.uB_overlap, axis=1) enc_uB_overlap_y_overlap_2_phi_2 = encrypt_matmul_3(uB_overlap_ex, self.enc_y_overlap_2_phi_2) enc_l1_grad_B = compute_X_plus_Y(np.squeeze(enc_uB_overlap_y_overlap_2_phi_2, axis=1), self.enc_y_overlap_phi) enc_loss_grad_B = compute_X_plus_Y(self.alpha * enc_l1_grad_B, self.enc_mapping_comp_A) self.loss_grads = enc_loss_grad_B self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads( self.X[self.overlap_indexes], enc_loss_grad_B)
def _update_gradients(self): U_B_overlap_ex = np.expand_dims(self.U_B_overlap, axis=1) grads = self.localModel.compute_gradients(self.X[self.overlap_indexes]) encrypted_U_B_comp_A_beta1 = encrypt_matmul_3(U_B_overlap_ex, self.comp_A_beta1) encrypted_grad_l1_B = compute_X_plus_Y(np.squeeze(encrypted_U_B_comp_A_beta1, axis=1), self.comp_A_beta2) encrypted_grad_loss_B = compute_X_plus_Y(self.alpha * encrypted_grad_l1_B, self.mapping_comp_A) self.loss_grads = encrypted_grad_loss_B self.encrypt_grads_W, self.encrypt_grads_b = self.__compute_encrypt_grads(grads, encrypted_grad_loss_B)
def _update_gradients(self): # y_overlap2 have shape (len(overlap_indexes), 1), # y_A_u_A has shape (1, feature_dim), # y_overlap2_y_A_u_A has shape (len(overlap_indexes), 1, feature_dim) y_overlap2_y_A_u_A = np.expand_dims(self.y_overlap2 * self.y_A_u_A, axis=1) # U_B_2_overlap has shape (len(overlap_indexes), feature_dim, feature_dim) # tmp has shape (len(overlap_indexes), feature_dim) tmp1 = encrypt_matmul_3(y_overlap2_y_A_u_A, self.U_B_2_overlap) tmp2 = 0.25 * np.squeeze(tmp1, axis=1) if self.is_trace: self.logger.info("tmp1 shape" + str(tmp1.shape)) self.logger.info("tmp2 shape" + str(tmp2.shape)) y_overlap = np.tile(self.y_overlap, (1, self.U_B_overlap.shape[-1])) tmp3 = compute_sum_XY(y_overlap * 0.5, self.U_B_overlap) # TODO: do sum effectively encrypt_const = np.sum(tmp2, axis=0) - tmp3 encrypt_const_overlap = np.tile(encrypt_const, (len(self.overlap_indexes), 1)) encrypt_const_nonoverlap = np.tile(encrypt_const, (len(self.non_overlap_indexes), 1)) y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.U_B_overlap.shape[-1])) if self.is_trace: self.logger.info("encrypt_const shape:" + str(encrypt_const.shape)) self.logger.info("encrypt_const_overlap shape" + str(encrypt_const_overlap.shape)) self.logger.info("encrypt_const_nonoverlap shape" + str(encrypt_const_nonoverlap.shape)) self.logger.info("y_non_overlap shape" + str(y_non_overlap.shape)) encrypt_grad_A_nonoverlap = compute_XY(self.alpha * y_non_overlap / len(self.y), encrypt_const_nonoverlap) encrypt_grad_A_overlap = compute_XY_plus_Z(self.alpha * y_overlap / len(self.y), encrypt_const_overlap, self.mapping_comp_B) if self.is_trace: self.logger.info("encrypt_grad_A_nonoverlap shape" + str(encrypt_grad_A_nonoverlap.shape)) self.logger.info("encrypt_grad_A_overlap shape" + str(encrypt_grad_A_overlap.shape)) encrypt_grad_loss_A = [[0 for _ in range(self.U_B_overlap.shape[1])] for _ in range(len(self.y))] # TODO: need more efficient way to do following task for i, j in enumerate(self.non_overlap_indexes): encrypt_grad_loss_A[j] = encrypt_grad_A_nonoverlap[i] for i, j in enumerate(self.overlap_indexes): encrypt_grad_loss_A[j] = encrypt_grad_A_overlap[i] encrypt_grad_loss_A = np.array(encrypt_grad_loss_A) if self.is_trace: self.logger.info("encrypt_grad_loss_A shape" + str(encrypt_grad_loss_A.shape)) self.logger.info("encrypt_grad_loss_A" + str(encrypt_grad_loss_A)) self.loss_grads = encrypt_grad_loss_A grads = self.localModel.compute_gradients(self.X) self.encrypt_grads_W, self.encrypt_grads_b = self.__compute_encrypt_grads(grads, encrypt_grad_loss_A)
def _update_gradients(self): # y_overlap_2 have shape (len(overlap_indexes), 1), # phi has shape (1, feature_dim), # y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim) y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1) # uB_2_overlap has shape (len(overlap_indexes), feature_dim, feature_dim) enc_y_overlap_2_phi_uB_overlap_2 = encrypt_matmul_3(y_overlap_2_phi, self.enc_uB_overlap_2) enc_loss_grads_const_part1 = np.sum(0.25 * np.squeeze(enc_y_overlap_2_phi_uB_overlap_2, axis=1), axis=0) if self.is_trace: self.logger.debug("enc_y_overlap_2_phi_uB_overlap_2 shape" + str(enc_y_overlap_2_phi_uB_overlap_2.shape)) self.logger.debug("enc_loss_grads_const_part1 shape" + str(enc_loss_grads_const_part1.shape)) y_overlap = np.tile(self.y_overlap, (1, self.enc_uB_overlap.shape[-1])) enc_loss_grads_const_part2 = compute_sum_XY(y_overlap * 0.5, self.enc_uB_overlap) enc_const = enc_loss_grads_const_part1 - enc_loss_grads_const_part2 enc_const_overlap = np.tile(enc_const, (len(self.overlap_indexes), 1)) enc_const_nonoverlap = np.tile(enc_const, (len(self.non_overlap_indexes), 1)) y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.enc_uB_overlap.shape[-1])) if self.is_trace: self.logger.debug("enc_const shape:" + str(enc_const.shape)) self.logger.debug("enc_const_overlap shape" + str(enc_const_overlap.shape)) self.logger.debug("enc_const_nonoverlap shape" + str(enc_const_nonoverlap.shape)) self.logger.debug("y_non_overlap shape" + str(y_non_overlap.shape)) enc_grad_A_nonoverlap = compute_XY(self.alpha * y_non_overlap / len(self.y), enc_const_nonoverlap) enc_grad_A_overlap = compute_XY_plus_Z(self.alpha * y_overlap / len(self.y), enc_const_overlap, self.enc_mapping_comp_B) if self.is_trace: self.logger.debug("enc_grad_A_nonoverlap shape" + str(enc_grad_A_nonoverlap.shape)) self.logger.debug("enc_grad_A_overlap shape" + str(enc_grad_A_overlap.shape)) enc_loss_grad_A = [[0 for _ in range(self.enc_uB_overlap.shape[1])] for _ in range(len(self.y))] # TODO: need more efficient way to do following task for i, j in enumerate(self.non_overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_nonoverlap[i] for i, j in enumerate(self.overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_overlap[i] enc_loss_grad_A = np.array(enc_loss_grad_A) if self.is_trace: self.logger.debug("enc_loss_grad_A shape" + str(enc_loss_grad_A.shape)) self.logger.debug("enc_loss_grad_A" + str(enc_loss_grad_A)) self.loss_grads = enc_loss_grad_A self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads( self.X, enc_loss_grad_A)
def test_encrypt_matmul_3_dim_3(self): X = np.array([[[1, 2, 3]], [[10, 11, 12]]], dtype=np.float64) Y = np.array([[[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]], dtype=np.float64) Z = np.matmul(X, Y) encrypt_Y = self.encrypt_3d_matrix(Y) res = encrypt_matmul_3(X, encrypt_Y) decrypt_res = decrypt_matrix(self.privatekey, res) assert_matrix(Z, decrypt_res)
def __precompute(self): # compute a component of guest's loss gradient # enc_y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim) # uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim) enc_y_overlap_2_phi = np.expand_dims(np.tile( self.enc_phi, (len(self.overlap_indexes), 1)), axis=1) enc_y_overlap_2_phi_uB_overlap_2 = encrypt_matmul_3( enc_y_overlap_2_phi, self.uB_overlap_2) self.precomputed_grad_component = 0.25 * np.squeeze( enc_y_overlap_2_phi_uB_overlap_2, axis=1) # compute a component of guest's loss enc_phi_uB_overlap_2_phi = 0 for uB_row in self.uB_overlap: uB_row = uB_row.reshape(1, -1) enc_phi_uB_overlap_2_phi += encrypt_matmul_2_ob( encrypt_matmul_2_ob(uB_row, self.enc_phi_2), uB_row.transpose()) self.precomputed_loss_component = enc_phi_uB_overlap_2_phi
def send_components(self): if self.is_min_gen_enc: self.logger.debug("using min_gen_enc") self._compute_components() # enc_uB_overlap has shape (len(overlap_indexes), feature_dim) # enc_uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim) enc_uB_overlap = encrypt_matrix(self.public_key, self.uB_overlap) enc_uB_overlap_2 = encrypt_matmul_3(np.expand_dims(self.uB_overlap, axis=2), np.expand_dims(enc_uB_overlap, axis=1)) # enc_mapping_comp_B has shape (len(overlap_indexes), feature_dim) scale_factor = np.tile((-1 / self.feature_dim), (enc_uB_overlap.shape[0], enc_uB_overlap.shape[1])) enc_mapping_comp_B = compute_XY(enc_uB_overlap, scale_factor) # enc_mapping_comp_B = enc_uB_overlap * (-1 / self.feature_dim) # enc_mapping_comp_B = encrypt_matrix(self.public_key, self.mapping_comp_B) return [enc_uB_overlap, enc_uB_overlap_2, enc_mapping_comp_B] else: components = super(EncryptedFTLHostModel, self).send_components() return self.__encrypt_components(components)