Exemple #1
0
    def diff_dim_mul(self, **kwargs):
        """
        Mul with different dimensions.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        d_2 = kwargs['data_2'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        x = pfl_mpc.data(name='x', shape=[3, 4], dtype='int64')
        y = pfl_mpc.data(name='y', shape=[4, 5], dtype='int64')
        op_mul = pfl_mpc.layers.mul(x=x, y=y)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'x': d_1, 'y': d_2}, fetch_list=[op_mul])

        self.assertEqual(results[0].shape, (2, 3, 5))
        return_results.append(results[0])
    def square_error_cost(self, **kwargs):
        """
        Normal case.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        d_2 = kwargs['data_2'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("privc", role, "localhost", self.server, int(self.port))
        data_1 = pfl_mpc.data(name='data_1', shape=[2, 2], dtype='int64')
        data_2 = pfl_mpc.data(name='data_2', shape=[2, 2], dtype='int64')
        out = pfl_mpc.layers.square_error_cost(data_1, data_2)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'data_1': d_1, 'data_2': d_2}, fetch_list=[out])

        self.assertEqual(results[0].shape, (2, 2))
        return_results.append(results[0])
    def softmax_with_cross_entropy(self, **kwargs):
        """
        Add two variables with one dimension.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        d_2 = kwargs['data_2'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        x = pfl_mpc.data(name='x', shape=[2], dtype='int64')
        y = pfl_mpc.data(name='y', shape=[2], dtype='int64')
        cost, softmax = pfl_mpc.layers.softmax_with_cross_entropy(
            x, y, soft_label=True, return_softmax=True)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'x': d_1, 'y': d_2}, fetch_list=[softmax])

        self.assertEqual(results[0].shape, (2, 2))
        return_results.append(results[0])
Exemple #4
0
    def diff_dim_add_mid(self, **kwargs):
        """
        Add with different dimensions.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        d_2 = kwargs['data_2'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        x = pfl_mpc.data(name='x', shape=[3, 4, 2], dtype='int64')
        y = pfl_mpc.data(name='y', shape=[4], dtype='int64')
        # math_add = x + y
        math_add = pfl_mpc.layers.elementwise_add(x, y, axis=1)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'x': d_1, 'y': d_2}, fetch_list=[math_add])

        self.assertEqual(results[0].shape, (2, 3, 4, 2))
        return_results.append(results[0])
Exemple #5
0
def decrypt_online(shares, shape):
    main_program = fluid.Program()
    startup_program = fluid.Program()
    with fluid.program_guard(main_program, startup_program):
        input = pfl_mpc.data(name='input', shape=shape[1:], dtype='int64')
        out = pfl_mpc.layers.reveal(input)

        place=fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        out_ = exe.run(feed={'input': np.array(shares).reshape(shape)}, fetch_list=[out])
        return out_
Exemple #6
0
    def mean(self, **kwargs):
        """
        Mean.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        data_1 = pfl_mpc.data(name='data_1', shape=[2, 4], dtype='int64')
        op_mean = pfl_mpc.layers.mean(data_1)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'data_1': d_1}, fetch_list=[op_mean])

        self.assertEqual(results[0].shape, (2, 1))
        return_results.append(results[0])
Exemple #7
0
    def reduce_sum(self, **kwargs):
        """
        Normal case.
        :param kwargs:
        :return:
        """

        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        data_1 = pfl_mpc.data(name='x', shape=[3, 4], dtype='int64')
        op_reduce_sum = pfl_mpc.layers.reduce_sum(data_1, [1, 2], keep_dim=True)
        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'x': d_1}, fetch_list=[op_reduce_sum])

        self.assertEqual(results[0].shape, (2, 1, 1))
        return_results.append(results[0])
Exemple #8
0
    def relu(self, **kwargs):
        """
        Normal case.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        data_1 = pfl_mpc.data(name='data_1', shape=[3, 2], dtype='int64')
        relu_out = pfl_mpc.layers.relu(input=data_1)

        exe = fluid.Executor(place=fluid.CPUPlace())
        results = exe.run(feed={'data_1': d_1}, fetch_list=[relu_out])

        self.assertEqual(results[0].shape, (2, 3, 2))
        return_results.append(results[0])
Exemple #9
0
    def pool2d(self, **kwargs):
        """
        Add two variables with one dimension.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        x = pfl_mpc.data(name='x', shape=[1, 1, 4, 6], dtype='int64')

        pool_out = pfl_mpc.layers.pool2d(input=x, pool_size=2, pool_stride=2)

        exe = fluid.Executor(place=fluid.CPUPlace())
        #exe.run(fluid.default_startup_program())
        results = exe.run(feed={'x': d_1}, fetch_list=[pool_out])

        self.assertEqual(results[0].shape, (2, 1, 1, 2, 3))
        return_results.append(results[0])
Exemple #10
0
    def fc(self, **kwargs):
        """
        Normal case.
        :param kwargs:
        :return:
        """
        role = kwargs['role']
        d_1 = kwargs['data_1'][role]
        return_results = kwargs['return_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
        data_1 = pfl_mpc.data(name='data_1', shape=[3, 2], dtype='int64')
        fc_out = pfl_mpc.layers.fc(
            input=data_1,
            size=1,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.ConstantInitializer(0)))
        exe = fluid.Executor(place=fluid.CPUPlace())
        exe.run(fluid.default_startup_program())
        results = exe.run(feed={'data_1': d_1}, fetch_list=[fc_out])

        self.assertEqual(results[0].shape, (2, 3, 1))
        return_results.append(results[0])
Exemple #11
0
    def embedding_op(self, **kwargs):
        role = kwargs['role']
        #data = kwargs['data']
        data_normal = kwargs['data_normal']
        data_share = kwargs['data_share'][role]

        w_data = kwargs['w_data']
        w_data_share = kwargs['w_data_share'][role]
        return_results = kwargs['return_results']
        expected_result = kwargs['expect_results']

        pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))

        w_param_attrs = fluid.ParamAttr(name='emb_weight',
                                        learning_rate=0.5,
                                        initializer=pfl_mpc.initializer.NumpyArrayInitializer(w_data_share),
                                        trainable=True)
        w_param_attrs1 = fluid.ParamAttr(name='emb_weight1',
                                        learning_rate=0.5,
                                        initializer=fluid.initializer.NumpyArrayInitializer(w_data),
                                        trainable=True)
        input_shape = np.delete(data_share.shape, 0, 0)
        data1 = pfl_mpc.data(name='input', shape=input_shape, dtype='int64')
        data2 = fluid.data(name='input1', shape=data_normal.shape, dtype='int64')

        math_embedding = fluid.input.embedding(input=data2, size=w_data.shape, param_attr=w_param_attrs1, dtype='float32')

        op_embedding = pfl_mpc.input.embedding(input=data1, size=(input_shape[1],input_shape[0]), param_attr=w_param_attrs, dtype='int64')

        exe = fluid.Executor(place=fluid.CPUPlace())
        exe.run(fluid.default_startup_program())

        results = exe.run(feed={'input': data_share, 'input1': data_normal}, fetch_list=[op_embedding, math_embedding])

        return_results.append(results[0])
        expected_result.append(results[1])
Exemple #12
0
import paddle
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
import paddle_fl.mpc.data_utils.aby3 as aby3

role, server, port = sys.argv[1], sys.argv[2], sys.argv[3]
# modify host(localhost).
pfl_mpc.init("aby3", int(role), "localhost", server, int(port))
role = int(role)

# data preprocessing
BATCH_SIZE = 128
epoch_num = 2

# network
x = pfl_mpc.data(name='x', shape=[BATCH_SIZE, 784], dtype='int64')
y = pfl_mpc.data(name='y', shape=[BATCH_SIZE, 1], dtype='int64')

y_pre = pfl_mpc.layers.fc(input=x, size=1)
cost = pfl_mpc.layers.sigmoid_cross_entropy_with_logits(y_pre, y)

infer_program = fluid.default_main_program().clone(for_test=False)

avg_loss = pfl_mpc.layers.mean(cost)
optimizer = pfl_mpc.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)

mpc_data_dir = "./mpc_data/"
if not os.path.exists(mpc_data_dir):
    raise ValueError(
        "mpc_data_dir is not found. Please prepare encrypted data.")
Exemple #13
0
import paddle_fl.mpc.data_utils.aby3 as aby3

logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)

role, server, port = sys.argv[1], sys.argv[2], sys.argv[3]
# modify host(localhost).
pfl_mpc.init("aby3", int(role), "localhost", server, int(port))
role = int(role)

# data preprocessing
BATCH_SIZE = 128
epoch_num = 1

x = pfl_mpc.data(name='x', shape=[BATCH_SIZE, 1, 28, 28], dtype='int64')
y = pfl_mpc.data(name='y', shape=[BATCH_SIZE, 10], dtype='int64')


class Model(object):
    def __int__(self):
        pass

    def lenet3(self):
        conv = pfl_mpc.layers.conv2d(input=x, num_filters=16, filter_size=5, act='relu')
        pool = pfl_mpc.layers.pool2d(input=conv, pool_size=2, pool_stride=2)
        fc_1 = pfl_mpc.layers.fc(input=pool, size=100, act='relu')
        fc_out = pfl_mpc.layers.fc(input=fc_1, size=10)
        cost, softmax = pfl_mpc.layers.softmax_with_cross_entropy(logits=fc_out,
                                                                  label=y,
                                                                  soft_label=True,
Exemple #14
0
    """
    reader = aby3.load_aby3_shares(path, id=role, shape=(party_num, ))
    for n in reader():
        return n


f_max = get_shares(data_path + 'feature_max')
f_min = get_shares(data_path + 'feature_min')
f_mean = get_shares(data_path + 'feature_mean')
sample_num = get_sample_num(data_path + 'sample_num')

pfl_mpc.init("aby3", int(role), "localhost", server, int(port))

shape = [party_num, feat_num]

mi = pfl_mpc.data(name='mi', shape=shape, dtype='int64')
ma = pfl_mpc.data(name='ma', shape=shape, dtype='int64')
me = pfl_mpc.data(name='me', shape=shape, dtype='int64')
sn = pfl_mpc.data(name='sn', shape=shape[:-1], dtype='int64')

out0, out1 = pfl_mpc.layers.mean_normalize(
    f_min=mi, f_max=ma, f_mean=me, sample_num=sn)

exe = fluid.Executor(place=fluid.CPUPlace())

f_range, f_mean = exe.run(
    feed={'mi': f_min,
          'ma': f_max,
          'me': f_mean,
          'sn': sample_num},
    fetch_list=[out0, out1])
Exemple #15
0
# choose party 0 as nccl "server", which receive nccl id of other parties
# party 0 listens on ports listed in endpoints
# other parties connect to those ports and send their nccl id
role, server = sys.argv[1], sys.argv[2]
pfl_mpc.init(mpc_protocol_name,
             int(role),
             net_server_addr=server,
             endpoints="33784,45888",
             network_mode="nccl")
role = int(role)

# data preprocessing
BATCH_SIZE = 32
epoch_num = 1

x = pfl_mpc.data(name='x', shape=[BATCH_SIZE, 3, 224, 224], dtype='int64')
y = pfl_mpc.data(name='y', shape=[BATCH_SIZE, 1000], dtype='int64')


class Model(object):
    """
   lenet model: alexnet, vgg-16
    """
    def __int__(self):
        """
        init
        """
        pass

    def alexnet(self):
        """
Exemple #16
0
def train(args):

    # Init MPC
    role = int(args.role)
    pfl_mpc.init("aby3", role, "localhost", args.server, int(args.port))

    # Input and Network
    BATCH_SIZE = args.batch_size
    FIELD_NUM = args.num_field
    FEATURE_NUM = args.sparse_feature_number + 1

    feat_idx = pfl_mpc.data(name='feat_idx',
                            shape=[BATCH_SIZE, FIELD_NUM, FEATURE_NUM],
                            lod_level=1,
                            dtype="int64")
    feat_value = pfl_mpc.data(name='feat_value',
                              shape=[BATCH_SIZE, FIELD_NUM],
                              lod_level=0,
                              dtype="int64")
    label = pfl_mpc.data(name='label',
                         shape=[BATCH_SIZE, 1],
                         lod_level=1,
                         dtype="int64")
    inputs = [feat_idx] + [feat_value] + [label]

    avg_cost, predict = mpc_network.FM(args, inputs, seed=2)
    infer_program = fluid.default_main_program().clone(for_test=True)
    optimizer = pfl_mpc.optimizer.SGD(args.base_lr)
    optimizer.minimize(avg_cost)

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    # Prepare train data
    mpc_data_dir = "./mpc_data/"
    mpc_train_data_dir = mpc_data_dir + 'train/'
    if not os.path.exists(mpc_train_data_dir):
        raise ValueError(
            "{} is not found. Please prepare encrypted data.".format(
                mpc_train_data_dir))
    feature_idx_reader = aby3.load_aby3_shares(mpc_train_data_dir +
                                               "criteo_feature_idx",
                                               id=role,
                                               shape=(FIELD_NUM, FEATURE_NUM))
    feature_value_reader = aby3.load_aby3_shares(mpc_train_data_dir +
                                                 "criteo_feature_value",
                                                 id=role,
                                                 shape=(FIELD_NUM, ))
    label_reader = aby3.load_aby3_shares(mpc_train_data_dir + "criteo_label",
                                         id=role,
                                         shape=(1, ))

    batch_feature_idx = aby3.batch(feature_idx_reader,
                                   BATCH_SIZE,
                                   drop_last=True)
    batch_feature_value = aby3.batch(feature_value_reader,
                                     BATCH_SIZE,
                                     drop_last=True)
    batch_label = aby3.batch(label_reader, BATCH_SIZE, drop_last=True)

    loader = fluid.io.DataLoader.from_generator(
        feed_list=[feat_idx, feat_value, label], capacity=BATCH_SIZE)
    batch_sample = paddle.reader.compose(batch_feature_idx,
                                         batch_feature_value, batch_label)
    loader.set_batch_generator(batch_sample, places=place)

    # Training
    logger.info('******************************************')
    logger.info('Start Training...')
    logger.info('batch_size = {}, learning_rate = {}'.format(
        args.batch_size, args.base_lr))

    mpc_model_basedir = "./mpc_model/"
    start_time = time.time()
    step = 0

    for epoch_id in range(args.epoch_num):
        for sample in loader():
            step += 1
            exe.run(feed=sample, fetch_list=[predict.name])
            batch_end = time.time()
            if step % 100 == 0:
                print('Epoch={}, Step={}, current cost time: {}'.format(
                    epoch_id, step, batch_end - start_time))

        print('Epoch={}, current cost time: {}'.format(epoch_id,
                                                       batch_end - start_time))

        # For each epoch: save infer program
        mpc_model_dir = mpc_model_basedir + "epoch{}/party{}".format(
            epoch_id, role)
        fluid.io.save_inference_model(
            dirname=mpc_model_dir,
            feeded_var_names=["feat_idx", "feat_value", "label"],
            target_vars=[predict],
            executor=exe,
            main_program=infer_program,
            model_filename="__model__")

        logger.info('Model is saved in {}'.format(mpc_model_dir))
    end_time = time.time()
    print('Mpc Training of Epoch={} Batch_size={}, epoch_cost={:.4f} s'.format(
        args.epoch_num, BATCH_SIZE, (end_time - start_time)))
    logger.info('******************************************')
    logger.info('End Training...')
Exemple #17
0
"""

# set proper path for fluid_encrypted without install, should be first line
import env_set

import time
import sys
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc

role, server, port = env_set.TestOptions().values()

pfl_mpc.init("aby3", int(role), "localhost", server, int(port))

data_1 = pfl_mpc.data(name='data_1', shape=[2, 2], dtype='int64')
data_2 = fluid.data(name='data_2', shape=[1, 2, 2], dtype='float32')

out_gt = data_1 > data_2
out_ge = data_1 >= data_2
out_lt = data_1 < data_2
out_le = data_1 <= data_2
out_eq = data_1 == data_2
out_neq = data_1 != data_2

d_1 = np.array([[[65536, 65536], [65536, 65536]],
                [[65536, 65536], [65536, 65536]]]).astype('int64')
d_2 = np.array([[[10, 3], [0, -3]]]).astype('float32')

exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
Exemple #18
0
"""

# set proper path for fluid_encrypted without install, should be first line
import env_set

import sys
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc

role, server, port = env_set.TestOptions().values()

# call mpc add
pfl_mpc.init("aby3", int(role), "localhost", server, int(port))

data_1 = pfl_mpc.data(name='data_1', shape=[8], dtype='int64')
data_2 = pfl_mpc.data(name='data_2', shape=[8], dtype='int64')

d_1 = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6,
                                           7]]).astype('int64')
d_2 = np.array([[7, 6, 5, 4, 3, 2, 1, 0], [7, 6, 5, 4, 3, 2, 1,
                                           0]]).astype('int64')

out_add = data_1 + data_2

exe = fluid.Executor(place=fluid.CPUPlace())
out_add = exe.run(feed={
    'data_1': d_1,
    'data_2': d_2,
}, fetch_list=[out_add])
def load_model_and_infer(args):

    # Init MPC
    role = int(args.role)
    pfl_mpc.init("aby3", role, "localhost", args.server, int(args.port))

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    # Input
    BATCH_SIZE = args.batch_size
    FIELD_NUM = args.num_field
    FEATURE_NUM = args.sparse_feature_number + 1

    feat_idx = pfl_mpc.data(name='feat_idx',
                            shape=[BATCH_SIZE, FIELD_NUM, FEATURE_NUM],
                            lod_level=1,
                            dtype="int64")
    feat_value = pfl_mpc.data(name='feat_value',
                              shape=[BATCH_SIZE, FIELD_NUM],
                              lod_level=0,
                              dtype="int64")
    label = pfl_mpc.data(name='label',
                         shape=[BATCH_SIZE, 1],
                         lod_level=1,
                         dtype="int64")

    # Prepare test data
    mpc_data_dir = "./mpc_data/"
    mpc_test_data_dir = mpc_data_dir + 'test/'
    if not os.path.exists(mpc_test_data_dir):
        raise ValueError(
            "{}is not found. Please prepare encrypted data.".format(
                mpc_test_data_dir))
    test_feature_idx_reader = aby3.load_aby3_shares(
        mpc_test_data_dir + "criteo_feature_idx",
        id=role,
        shape=(FIELD_NUM, FEATURE_NUM))
    test_feature_value_reader = aby3.load_aby3_shares(mpc_test_data_dir +
                                                      "criteo_feature_value",
                                                      id=role,
                                                      shape=(FIELD_NUM, ))
    test_label_reader = aby3.load_aby3_shares(mpc_test_data_dir +
                                              "criteo_label",
                                              id=role,
                                              shape=(1, ))

    test_batch_feature_idx = aby3.batch(test_feature_idx_reader,
                                        BATCH_SIZE,
                                        drop_last=True)
    test_batch_feature_value = aby3.batch(test_feature_value_reader,
                                          BATCH_SIZE,
                                          drop_last=True)
    test_batch_label = aby3.batch(test_label_reader,
                                  BATCH_SIZE,
                                  drop_last=True)

    test_loader = fluid.io.DataLoader.from_generator(
        feed_list=[feat_idx, feat_value, label],
        capacity=BATCH_SIZE,
        drop_last=True)
    test_batch_sample = paddle.reader.compose(test_batch_feature_idx,
                                              test_batch_feature_value,
                                              test_batch_label)
    test_loader.set_batch_generator(test_batch_sample, places=place)

    for i in range(args.epoch_num):
        mpc_model_dir = './mpc_model/epoch{}/party{}'.format(i, role)
        mpc_model_filename = '__model__'
        infer(test_loader, role, exe, BATCH_SIZE, mpc_model_dir,
              mpc_model_filename)
Exemple #20
0
# See the License for the specific language governing permissions and
# limitations under the License.

import env_set
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc

role, server, port = env_set.TestOptions().values()

pfl_mpc.init("aby3", int(role), "localhost", server, int(port))

batch_size = 3

# x is in cypher text type
x = pfl_mpc.data(name='x', shape=[batch_size, 8], dtype='int64')
# y is in cypher text type
y = pfl_mpc.data(name='y', shape=[batch_size, 1], dtype='int64')

y_pre = pfl_mpc.layers.fc(input=x, size=1, act=None)
y_relu = pfl_mpc.layers.relu(input=y_pre)
cost = pfl_mpc.layers.square_error_cost(input=y_relu, label=y)
avg_loss = pfl_mpc.layers.mean(cost)

optimizer = pfl_mpc.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)

exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())

iters = 1