def test_dyanmic_gru_op(self): data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [-1.0, -2.0, -3.0]]).astype('float32') data_share = aby3.make_shares(data) data_all3shares = np.array( [aby3.get_aby3_shares(data_share, i) for i in range(3)]) weight = np.array([[0.0, 0.0, 0.0]]).astype('float32') weight_share = aby3.make_shares(weight) weight_all3shares = np.array( [aby3.get_aby3_shares(weight_share, i) for i in range(3)]) return_results = Manager().list() return_results_cheb = Manager().list() expect_results = Manager().list() ret = self.multi_party_run(target=self.dyanmic_gru_op, data=data, data_share=data_all3shares, weight=weight, weight_share=weight_all3shares, return_results=return_results, return_results_cheb=return_results_cheb, expect_results=expect_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) revealed_cheb = aby3.reconstruct(np.array(return_results_cheb)) print("expected:", expect_results[0]) print("reveal: ", revealed) print("reveal_cheb: ", revealed_cheb) self.assertTrue(np.allclose(revealed, expect_results[0], atol=1e-1 * 5)) self.assertTrue( np.allclose(revealed_cheb, expect_results[0], atol=1e-1 * 5))
def test_mean_normalize(self): f_nums = 100 sample_nums = np.array(range(2, 10, 2)) mat, mi, ma, me = gen_data(f_nums, sample_nums) self.input_size = [len(sample_nums), f_nums] share = lambda x: np.array([x * mdu.mpc_one_share] * 2).astype( 'int64').reshape([2] + list(x.shape)) self.f_range_list = Manager().list() self.f_mean_list = Manager().list() ret = self.multi_party_run(target=self.mean_normalize, min=share(mi), max=share(ma), mean=share(me), sample_num=share(sample_nums)) self.assertEqual(ret[0], True) f_r = aby3.reconstruct(np.array(self.f_range_list)) f_m = aby3.reconstruct(np.array(self.f_mean_list)) plain_r, plain_m = mean_norm_naive(mat) self.assertTrue(np.allclose(f_r, plain_r, atol=1e-4)) self.assertTrue(np.allclose(f_m, plain_m, atol=1e-4))
def get_output(): sum = [] return_results = dict() for name in (output_names): return_results[name] = Manager().list() def closure(**kwargs): role = kwargs['role'] pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port)) executor = Executor(place) executor.run() op.run(scope, place) for name in output_names: out = np.array(scope.find_var(name).get_tensor()) return_results[name].append(out) ret = self.multi_party_run(target=closure) self.assertEqual(ret[0], True) for output_name in output_names: plain = aby3.reconstruct(np.array(return_results[output_name])) sum.append(plain.mean()) return (np.array(sum).sum() / len(output_names)).astype(np.float)
def test_diff_dim_mul_mid(self): data_1 = np.full((3, 4, 2), fill_value=2) data_2 = np.full(( 3, 4, ), fill_value=1.5) #data_2 = np.ones((4,)) # print(data_1) # print(data_2) data_1_shares = aby3.make_shares(data_1) data_2_shares = aby3.make_shares(data_2) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) data_2_all3shares = np.array( [aby3.get_aby3_shares(data_2_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.diff_dim_mul_mid, data_1=data_1_all3shares, data_2=data_2_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([[[3, 3], [3, 3], [3, 3], [3, 3]], [[3, 3], [3, 3], [3, 3], [3, 3]], [[3, 3], [3, 3], [3, 3], [3, 3]]]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_pool2d(self): data_1 = np.array([[[[1, 2, 3, 4, 0, 100], [5, 6, 7, 8, 0, 100], [9, 10, 11, 12, 0, 200], [13, 14, 15, 16, 0, 200]]]]).astype('float32') expected_out = np.array([[[[6, 8, 100], [14, 16, 200]]]]).astype('float32') print("input data_1: {} \n".format(data_1)) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.pool2d, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) #print("revealed: {} \n".format(revealed)) #print("expected: {} \n".format(expected_out)) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-2))
def test_embedding_op(self): data = np.array([[1, 0, 0], [0, 1, 0]]) data_normal = np.array([0, 1]).astype('int64') w_data = np.array([[1, 2], [2, 3], [3, 4]]) # data = self.gen_one_hot(data_normal, w_data.shape[0]).astype('int64') data_share = aby3.make_shares(np.array(data)) data_all3shares = np.array([aby3.get_aby3_shares(data_share, i) for i in range(3)]) w_data_share = aby3.make_shares(w_data) w_data_all3shares = np.array([aby3.get_aby3_shares(w_data_share, i) for i in range(3)]) return_results = Manager().list() expect_results = Manager().list() ret = self.multi_party_run(target=self.embedding_op, data=data, data_normal=data_normal, w_data=w_data, data_share=data_all3shares, w_data_share=w_data_all3shares, return_results=return_results, expect_results=expect_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) # print("reveal: ", revealed) self.assertTrue(np.allclose(revealed, expect_results[0], atol=1e-4))
def test_reconstruct(self): num_arr = np.arange(0, 4).reshape((2, 2)).astype(np.float32) shares = aby3.make_shares(num_arr) all_3shares = np.array( [aby3.get_aby3_shares(shares, i) for i in range(3)]) recon = aby3.reconstruct(all_3shares) self.assertTrue(np.allclose(num_arr, recon))
def test_softmax_with_cross_entropy(self): data_1 = np.array([1, 1]).astype('float32') data_2 = np.array([1, 0]).astype('float32') expected_out = np.array([0.5, 0.5]).astype('float32') #print("input data_1: {} \n".format(data_1)) data_1_shares = aby3.make_shares(data_1) data_2_shares = aby3.make_shares(data_2) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) data_2_all3shares = np.array( [aby3.get_aby3_shares(data_2_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.softmax_with_cross_entropy, data_1=data_1_all3shares, data_2=data_2_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) #print("revealed: {} \n".format(revealed)) #print("expected: {} \n".format(expected_out)) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def decrypt_data(filepath, shape): """ load the encrypted data and reconstruct """ part_readers = [] for id in six.moves.range(3): part_readers.append(aby3.load_aby3_shares(filepath, id=id, shape=shape)) aby3_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) for instance in aby3_share_reader(): p = aby3.reconstruct(np.array(instance)) return p
def test_relu(self): data_1 = np.arange(-3, 3).reshape((3, 2)) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.relu, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([[0, 0], [0, 0], [1, 2]]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_reduce_sum(self): data_1 = np.full(shape=(3, 4), fill_value=2) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.reduce_sum, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([[24]]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def load_decrypt_data(filepath, shape): """ load the encrypted data and reconstruct """ part_readers = [] for id in six.moves.range(3): part_readers.append(aby3.load_aby3_shares(filepath, id=id, shape=shape)) aby3_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) epoch_id = 0 for instance in aby3_share_reader(): p = aby3.reconstruct(np.array(instance)) print("Epoch %d, Step 0, Loss: %f " % (epoch_id, p[0])) epoch_id += 1
def decrypt_data_to_file(cypher_filepath, plaintext_filepath, shape): """ Load the encrypted data and reconstruct. """ part_readers = [] for id in six.moves.range(3): part_readers.append( aby3.load_aby3_shares(cypher_filepath, id=id, shape=shape)) aby3_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) for instance in aby3_share_reader(): p = aby3.reconstruct(np.array(instance)) tmp = pd.DataFrame(p) tmp.to_csv(plaintext_filepath, mode='a', index=False, header=0)
def decrypt_data_to_file(filepath, shape, decrypted_filepath): """ load the encrypted data and reconstruct """ part_readers = [] for id in six.moves.range(3): part_readers.append(aby3.load_aby3_shares(filepath, id=id, shape=shape)) aby3_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) for instance in aby3_share_reader(): p = aby3.reconstruct(np.array(instance)) with open(decrypted_filepath, 'a+') as f: for i in p: f.write(str(i) + '\n')
def _get_gradient(self, input_to_check, place, output_names, no_grad_set, parallel=False): prog = Program() block = prog.global_block() self._append_ops(block) loss = append_loss_ops(block, output_names) param_grad_list = append_backward(loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set) inputs = self._get_inputs(block) feed_dict = self.feed_var(inputs, place) fetch_list = [g for p, g in param_grad_list] return_results = [Manager().list() for _ in range(len(fetch_list))] def closure(**kwargs): role = kwargs['role'] pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port)) #init_op = fluid.default_main_program().global_block().ops[0] #_insert_init_op(program, init_op) executor = Executor(place) executor.run() outs = executor.run(prog, feed=feed_dict, fetch_list=fetch_list) for idx in range(len(fetch_list)): return_results[idx].append(outs[idx]) ret = self.multi_party_run(target=closure) self.assertEqual(ret[0], True) outs = [] for idx in range(len(fetch_list)): outs.append(aby3.reconstruct(np.array(return_results[idx]))) return outs
def test_mean(self): """ Test normal case. :return: """ data_1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array([aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.mean, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([4.5]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_square(self): """ Test normal case. :return: """ data_1 = np.full(shape=(2, 2), fill_value=3) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.square, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([[9, 9], [9, 9]]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_square_error_cost(self): data_1 = np.arange(0, 4).reshape((2, 2)) data_2 = np.full(shape=(2, 2), fill_value=2) data_1_shares = aby3.make_shares(data_1) data_2_shares = aby3.make_shares(data_2) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) data_2_all3shares = np.array( [aby3.get_aby3_shares(data_2_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.square_error_cost, data_1=data_1_all3shares, data_2=data_2_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = np.array([[4, 1], [0, 1]]) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_diff_dim_mul(self): data_1 = np.arange(0, 12).reshape((3, 4)) data_2 = np.full(shape=(4, 5), fill_value=2) data_1_shares = aby3.make_shares(data_1) data_2_shares = aby3.make_shares(data_2) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) data_2_all3shares = np.array( [aby3.get_aby3_shares(data_2_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.diff_dim_mul, data_1=data_1_all3shares, data_2=data_2_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) expected_out = data_1.dot(data_2) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-4))
def test_batch_norm(self): data_1 = np.array([[10, 10, 10], [50, 50, 50]]).astype('float32') expected_out = np.array([[-1, -1, -1], [1, 1, 1]]).astype('float32') # print("input data_1: {} \n".format(data_1)) data_1_shares = aby3.make_shares(data_1) data_1_all3shares = np.array( [aby3.get_aby3_shares(data_1_shares, i) for i in range(3)]) return_results = Manager().list() ret = self.multi_party_run(target=self.batch_norm, data_1=data_1_all3shares, return_results=return_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) # print("revealed: {} \n".format(revealed)) # print("expected: {} \n".format(expected_out)) self.assertTrue(np.allclose(revealed, expected_out, atol=1e-2))
def decrypt_data_to_file(filepath, shape, decrypted_filepath): """ load the encrypted data (arithmetic share) and reconstruct to a file """ #while(not (os.path.exists(filepath + '.part0') # and os.path.exists(filepath + '.part1') # and os.path.exists(filepath + '.part2'))): # time.sleep(0.1) if os.path.exists(decrypted_filepath): os.remove(decrypted_filepath) part_readers = [] for id in six.moves.range(3): part_readers.append(aby3.load_aby3_shares(filepath, id=id, shape=shape)) aby3_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) for instance in aby3_share_reader(): p = aby3.reconstruct(np.array(instance)) with open(decrypted_filepath, 'a+') as f: for i in p: f.write(str(i) + '\n')
def _calc_output(self, place, parallel=False, no_check_set=None, loss=None, enable_inplace=None, for_inplace_test=False): program = Program() block = program.global_block() op = self._append_ops(block) inputs = self._get_inputs(block) outputs = self._get_outputs(block) feed_map = self.feed_var(inputs, place) if for_inplace_test: # Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op, # and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]). # Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them, # since feed op calls check_memory_size() which fails when tensor's holder_ is NULL. for name in op.output_arg_names: var = block.var(name) var.persistable = True original_program = program #if parallel: # use_cuda = False # if isinstance(place, fluid.CUDAPlace): # use_cuda = True # compiled_prog = fluid.CompiledProgram(program).with_data_parallel( # loss_name=loss.name if loss else None, places=place) # program = compiled_prog fetch_list = getattr(self, "fetch_list", []) # if the fetch_list is customized by user, we use it directly. # if not, fill the fetch_list by the user configured outputs in test. if len(fetch_list) == 0: for var_name, var in six.iteritems(outputs): if no_check_set is not None and var_name in no_check_set: continue if isinstance(var, list): for v in var: fetch_list.append(v.name) else: fetch_list.append(var.name) # if the fetch_list still empty, fill the fetch_list by the operator output. if len(fetch_list) == 0: for out_name, out_dup in Operator.get_op_outputs(self.op_type): fetch_list.append(str(out_name)) if enable_inplace is not None: build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = enable_inplace compiled_prog = fluid.CompiledProgram(program).with_data_parallel( build_strategy=build_strategy, places=place) program = compiled_prog # Manager() can not store LoDTensor directly # So, use one additional element to store output lod return_results = [Manager().list() for _ in range(len(fetch_list) + 1)] def closure(**kwargs): role = kwargs['role'] pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port)) #init_op = fluid.default_main_program().global_block().ops[0] #_insert_init_op(program, init_op) executor = Executor(place) executor.run() outs = executor.run(program, feed=feed_map, fetch_list=fetch_list, return_numpy=False) lod = [] for idx in range(len(fetch_list)): return_results[idx].append(np.array(outs[idx])) lod_i = outs[idx].lod() lod_concat = [] for i in lod_i: lod_concat.append(i) lod.append(lod_concat) return_results[len(fetch_list)].append(lod) ret = self.multi_party_run(target=closure) self.assertEqual(ret[0], True) outs = [] lod = np.array(return_results[len(fetch_list)]) for idx in range(len(fetch_list)): t = fluid.LoDTensor() reveal_data = aby3.reconstruct(np.array(return_results[idx])) t.set(reveal_data, place) lod_idx = lod[0][idx] try: t.set_lod(lod_idx) except Exception as e: pass outs.append(t) self.op = op self.program = original_program if for_inplace_test: return outs, fetch_list, feed_map, original_program, op.desc else: return outs, fetch_list
def _get_gradient(self, input_to_check, place, output_names, no_grad_set, parallel=False): prog = Program() block = prog.global_block() self._append_ops(block) loss = append_loss_ops(block, output_names) param_grad_list = append_backward(loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set) inputs = self._get_inputs(block) feed_dict = self.feed_var(inputs, place) fetch_list = [g for p, g in param_grad_list] # Manager() can not store LoDTensor directly # So, use one additional element to store output lod return_results = [Manager().list() for _ in range(len(fetch_list) + 1)] def closure(**kwargs): role = kwargs['role'] pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port)) #init_op = fluid.default_main_program().global_block().ops[0] #_insert_init_op(program, init_op) executor = Executor(place) executor.run() outs = executor.run(prog, feed=feed_dict, fetch_list=fetch_list, return_numpy=False) # append lod information in last position lod = [] for idx in range(len(fetch_list)): return_results[idx].append(np.array(outs[idx])) lod_i = outs[idx].lod() lod_concat = [] for i in lod_i: lod_concat.append(i) lod.append(lod_concat) return_results[len(fetch_list)].append(lod) ret = self.multi_party_run(target=closure) self.assertEqual(ret[0], True) outs = [] lod = np.array(return_results[len(fetch_list)]) # from numpy array to LoDTensor for idx in range(len(fetch_list)): t = fluid.LoDTensor() reveal_data = aby3.reconstruct(np.array(return_results[idx])) t.set(reveal_data, place) lod_idx = lod[0][idx] # TODO: fix: exception throw because some output lod error in gru op # out.set_lod(out.lod()) will throw exception try: t.set_lod(lod_idx) except Exception as e: pass outs.append(t) return outs