def _update_gradients(self): # enc_y_overlap_2_phi_uB_overlap_2 was calculated by host if self.is_trace: self.logger.debug("enc_y_overlap_2_phi_uB_overlap_2 shape" + str(self.enc_y_overlap_2_phi_uB_overlap_2.shape)) if self.host_public_key is not None and self.public_key != self.host_public_key: # TODO: decrypt enc_y_overlap_2_phi_uB_overlap_2 self.enc_y_overlap_2_phi_uB_overlap_2 = decrypt_matrix( self.private_key, self.enc_y_overlap_2_phi_uB_overlap_2) y_overlap = np.tile(self.y_overlap, (1, self.enc_uB_overlap.shape[-1])) enc_y_overlap_uB_overlap = compute_sum_XY(y_overlap * 0.5, self.enc_uB_overlap) enc_const = np.sum(self.enc_y_overlap_2_phi_uB_overlap_2, axis=0) - enc_y_overlap_uB_overlap enc_const_overlap = np.tile(enc_const, (len(self.overlap_indexes), 1)) enc_const_nonoverlap = np.tile(enc_const, (len(self.non_overlap_indexes), 1)) y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.enc_uB_overlap.shape[-1])) if self.is_trace: self.logger.debug("enc_const shape:" + str(enc_const.shape)) self.logger.debug("enc_const_overlap shape" + str(enc_const_overlap.shape)) self.logger.debug("enc_const_nonoverlap shape" + str(enc_const_nonoverlap.shape)) self.logger.debug("y_non_overlap shape" + str(y_non_overlap.shape)) enc_grad_A_nonoverlap = compute_XY( self.alpha * y_non_overlap / len(self.y), enc_const_nonoverlap) enc_grad_A_overlap = compute_XY_plus_Z( self.alpha * y_overlap / len(self.y), enc_const_overlap, self.enc_mapping_comp_B) if self.is_trace: self.logger.debug("enc_grad_A_nonoverlap shape" + str(enc_grad_A_nonoverlap.shape)) self.logger.debug("enc_grad_A_overlap shape" + str(enc_grad_A_overlap.shape)) enc_loss_grad_A = [[0 for _ in range(self.enc_uB_overlap.shape[1])] for _ in range(len(self.y))] for i, j in enumerate(self.non_overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_nonoverlap[i] for i, j in enumerate(self.overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_overlap[i] enc_loss_grad_A = np.array(enc_loss_grad_A) if self.is_trace: self.logger.debug("enc_loss_grad_A shape" + str(enc_loss_grad_A.shape)) self.logger.debug("enc_loss_grad_A" + str(enc_loss_grad_A)) self.loss_grads = enc_loss_grad_A self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads( self.X, enc_loss_grad_A)
def test_distributed_calculate_XY(self): print("--- test_distributed_calculate_XY ---") X = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) Y = np.array([[1], [-1], [1]]) actual_XY = X * Y XY = compute_XY(X, Y) assert_matrix(actual_XY, XY)
def _update_gradients(self): # y_overlap2 have shape (len(overlap_indexes), 1), # y_A_u_A has shape (1, feature_dim), # y_overlap2_y_A_u_A has shape (len(overlap_indexes), 1, feature_dim) y_overlap2_y_A_u_A = np.expand_dims(self.y_overlap2 * self.y_A_u_A, axis=1) # U_B_2_overlap has shape (len(overlap_indexes), feature_dim, feature_dim) # tmp has shape (len(overlap_indexes), feature_dim) tmp1 = encrypt_matmul_3(y_overlap2_y_A_u_A, self.U_B_2_overlap) tmp2 = 0.25 * np.squeeze(tmp1, axis=1) if self.is_trace: self.logger.info("tmp1 shape" + str(tmp1.shape)) self.logger.info("tmp2 shape" + str(tmp2.shape)) y_overlap = np.tile(self.y_overlap, (1, self.U_B_overlap.shape[-1])) tmp3 = compute_sum_XY(y_overlap * 0.5, self.U_B_overlap) # TODO: do sum effectively encrypt_const = np.sum(tmp2, axis=0) - tmp3 encrypt_const_overlap = np.tile(encrypt_const, (len(self.overlap_indexes), 1)) encrypt_const_nonoverlap = np.tile(encrypt_const, (len(self.non_overlap_indexes), 1)) y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.U_B_overlap.shape[-1])) if self.is_trace: self.logger.info("encrypt_const shape:" + str(encrypt_const.shape)) self.logger.info("encrypt_const_overlap shape" + str(encrypt_const_overlap.shape)) self.logger.info("encrypt_const_nonoverlap shape" + str(encrypt_const_nonoverlap.shape)) self.logger.info("y_non_overlap shape" + str(y_non_overlap.shape)) encrypt_grad_A_nonoverlap = compute_XY(self.alpha * y_non_overlap / len(self.y), encrypt_const_nonoverlap) encrypt_grad_A_overlap = compute_XY_plus_Z(self.alpha * y_overlap / len(self.y), encrypt_const_overlap, self.mapping_comp_B) if self.is_trace: self.logger.info("encrypt_grad_A_nonoverlap shape" + str(encrypt_grad_A_nonoverlap.shape)) self.logger.info("encrypt_grad_A_overlap shape" + str(encrypt_grad_A_overlap.shape)) encrypt_grad_loss_A = [[0 for _ in range(self.U_B_overlap.shape[1])] for _ in range(len(self.y))] # TODO: need more efficient way to do following task for i, j in enumerate(self.non_overlap_indexes): encrypt_grad_loss_A[j] = encrypt_grad_A_nonoverlap[i] for i, j in enumerate(self.overlap_indexes): encrypt_grad_loss_A[j] = encrypt_grad_A_overlap[i] encrypt_grad_loss_A = np.array(encrypt_grad_loss_A) if self.is_trace: self.logger.info("encrypt_grad_loss_A shape" + str(encrypt_grad_loss_A.shape)) self.logger.info("encrypt_grad_loss_A" + str(encrypt_grad_loss_A)) self.loss_grads = encrypt_grad_loss_A grads = self.localModel.compute_gradients(self.X) self.encrypt_grads_W, self.encrypt_grads_b = self.__compute_encrypt_grads(grads, encrypt_grad_loss_A)
def test_distributed_calculate_XY_2(self): print("--- test_distributed_calculate_XY_2 ---") # X has shape (4, 3, 3) X = np.random.rand(4, 3, 3) # Y has shape (4, 1, 1) Y = np.random.rand(4, 1, 1) actual_XY = X * Y print(actual_XY, actual_XY.shape) XY = compute_XY(X, Y) assert_matrix(actual_XY, XY)
def test_distributed_calculate_XY_1(self): print("--- test_distributed_calculate_XY_1 ---") # X has shape (4, 3) X = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10, 11, 12]]) # Y has shape (4, 1) Y = np.array([[2], [1], [-1], [1]]) actual_XY = X * Y print(actual_XY, actual_XY.shape) XY = compute_XY(X, Y) assert_matrix(actual_XY, XY)
def _update_gradients(self): # y_overlap_2 have shape (len(overlap_indexes), 1), # phi has shape (1, feature_dim), # y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim) y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1) # uB_2_overlap has shape (len(overlap_indexes), feature_dim, feature_dim) enc_y_overlap_2_phi_uB_overlap_2 = encrypt_matmul_3(y_overlap_2_phi, self.enc_uB_overlap_2) enc_loss_grads_const_part1 = np.sum(0.25 * np.squeeze(enc_y_overlap_2_phi_uB_overlap_2, axis=1), axis=0) if self.is_trace: self.logger.debug("enc_y_overlap_2_phi_uB_overlap_2 shape" + str(enc_y_overlap_2_phi_uB_overlap_2.shape)) self.logger.debug("enc_loss_grads_const_part1 shape" + str(enc_loss_grads_const_part1.shape)) y_overlap = np.tile(self.y_overlap, (1, self.enc_uB_overlap.shape[-1])) enc_loss_grads_const_part2 = compute_sum_XY(y_overlap * 0.5, self.enc_uB_overlap) enc_const = enc_loss_grads_const_part1 - enc_loss_grads_const_part2 enc_const_overlap = np.tile(enc_const, (len(self.overlap_indexes), 1)) enc_const_nonoverlap = np.tile(enc_const, (len(self.non_overlap_indexes), 1)) y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.enc_uB_overlap.shape[-1])) if self.is_trace: self.logger.debug("enc_const shape:" + str(enc_const.shape)) self.logger.debug("enc_const_overlap shape" + str(enc_const_overlap.shape)) self.logger.debug("enc_const_nonoverlap shape" + str(enc_const_nonoverlap.shape)) self.logger.debug("y_non_overlap shape" + str(y_non_overlap.shape)) enc_grad_A_nonoverlap = compute_XY(self.alpha * y_non_overlap / len(self.y), enc_const_nonoverlap) enc_grad_A_overlap = compute_XY_plus_Z(self.alpha * y_overlap / len(self.y), enc_const_overlap, self.enc_mapping_comp_B) if self.is_trace: self.logger.debug("enc_grad_A_nonoverlap shape" + str(enc_grad_A_nonoverlap.shape)) self.logger.debug("enc_grad_A_overlap shape" + str(enc_grad_A_overlap.shape)) enc_loss_grad_A = [[0 for _ in range(self.enc_uB_overlap.shape[1])] for _ in range(len(self.y))] # TODO: need more efficient way to do following task for i, j in enumerate(self.non_overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_nonoverlap[i] for i, j in enumerate(self.overlap_indexes): enc_loss_grad_A[j] = enc_grad_A_overlap[i] enc_loss_grad_A = np.array(enc_loss_grad_A) if self.is_trace: self.logger.debug("enc_loss_grad_A shape" + str(enc_loss_grad_A.shape)) self.logger.debug("enc_loss_grad_A" + str(enc_loss_grad_A)) self.loss_grads = enc_loss_grad_A self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads( self.X, enc_loss_grad_A)
def send_components(self): if self.is_min_gen_enc: self.logger.debug("using min_gen_enc") self._compute_components() # phi has shape (1, feature_dim) # phi_2 has shape (feature_dim, feature_dim) enc_phi = encryption.encrypt_matrix(self.public_key, self.phi) enc_phi_2 = encrypt_matmul_2_ob(self.phi.transpose(), enc_phi) # enc_y_overlap_2_phi_2 = 0.25 * np.expand_dims(self.y_overlap_2, axis=2) * enc_phi_2 # enc_y_overlap_phi = -0.5 * self.y_overlap * enc_phi enc_y_overlap_2_phi_2 = compute_XY(0.25 * np.expand_dims(self.y_overlap_2, axis=2), np.tile(enc_phi_2, (self.y_overlap_2.shape[0], 1, 1))) enc_y_overlap_phi = compute_XY(-0.5 * self.y_overlap, np.tile(enc_phi, (self.y_overlap.shape[0], 1))) enc_mapping_comp_A = encrypt_matrix(self.public_key, self.mapping_comp_A) return [enc_y_overlap_2_phi_2, enc_y_overlap_phi, enc_mapping_comp_A] else: components = super(EncryptedFTLGuestModel, self).send_components() return self.__encrypt_components(components)
def send_components(self): if self.is_min_gen_enc: self.logger.debug("using min_gen_enc") self._compute_components() # enc_uB_overlap has shape (len(overlap_indexes), feature_dim) # enc_uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim) enc_uB_overlap = encrypt_matrix(self.public_key, self.uB_overlap) enc_uB_overlap_2 = encrypt_matmul_3(np.expand_dims(self.uB_overlap, axis=2), np.expand_dims(enc_uB_overlap, axis=1)) # enc_mapping_comp_B has shape (len(overlap_indexes), feature_dim) scale_factor = np.tile((-1 / self.feature_dim), (enc_uB_overlap.shape[0], enc_uB_overlap.shape[1])) enc_mapping_comp_B = compute_XY(enc_uB_overlap, scale_factor) # enc_mapping_comp_B = enc_uB_overlap * (-1 / self.feature_dim) # enc_mapping_comp_B = encrypt_matrix(self.public_key, self.mapping_comp_B) return [enc_uB_overlap, enc_uB_overlap_2, enc_mapping_comp_B] else: components = super(EncryptedFTLHostModel, self).send_components() return self.__encrypt_components(components)