Beispiel #1
0
def run_mpc_kmeans(epochs=5, input_path=None, k=2, skip_plaintext=False):
    crypten.init()
    torch.manual_seed(1)

    dataset = pd.read_csv(input_path)
    X = dataset.iloc[:, [3, 4]].values
    dataset.describe()

    #print_dataset(X)

    if k > len(X):
        print("K means not possible as cluster is greater than  dataset")
        sys.exit()

    enc_dataset = []
    for x in X:
        tensor = crypten.cryptensor(x)
        enc_dataset.append((tensor, -1))

    logging.info("==================")
    logging.info("CrypTen K Means  Training")
    logging.info("==================")
    clusters = train_kmeans(enc_dataset, epochs, k)
    # if crypten.communicator.get().get_rank() == 0:
    logging.info("==================")
    logging.info("Decrypting Clusters ")
    logging.info("==================")
    decrypted_clusters = decrypt_clusters(clusters, k)

    # if crypten.communicator.get().get_rank() == 0:
    logging.info("==================")
    logging.info("Printing  Clusters ")
    logging.info("==================")
    verify_clusters(decrypted_clusters)
Beispiel #2
0
def main():
    """Runs benchmarks and saves if path is provided"""
    crypten.init()
    args = get_args()
    device = torch.device(args.device)

    benchmarks = [
        FuncBenchmarks(device=device),
        ModelBenchmarks(device=device, advanced_models=args.advanced_models),
    ]

    if args.only_functions:
        benchmarks = [FuncBenchmarks(device=device)]

    if args.world_size > 1:
        if args.ttp:
            crypten.mpc.set_default_provider("TTP")
        launcher = multiprocess_launcher.MultiProcessLauncher(
            args.world_size, multiprocess_caller, fn_args=args)
        launcher.start()
        launcher.join()
        launcher.terminate()

    else:
        pd.set_option("display.precision", 3)
        for benchmark in benchmarks:
            benchmark.run()
            print(benchmark)
            if args.path:
                benchmark.save(args.path)
def main():
    # Init Crypten and disable OpenMP threads (needed by @mpc.run_multiprocess
    crypten.init()
    torch.set_num_threads(1)

    lr = LogisticRegression()
    lr.train(init_w, training_samples, alpha)
Beispiel #4
0
    def __init__(self):
        """Initializes a Trusted Third Party server that receives requests"""
        # Initialize connection
        crypten.init()
        self.group = comm.get().ttp_group
        self._setup_generators()

        logging.info("TTPServer Initialized")
        try:
            while True:
                # Wait for next request from client
                message = comm.get().recv_obj(0, self.group)
                logging.info("Message received: %s" % message)

                if message == "terminate":
                    logging.info("TTPServer shutting down.")
                    return

                function = message["function"]
                args = message["args"]
                kwargs = message["kwargs"]
                result = getattr(self, function)(*args, **kwargs)
                comm.get().send_obj(result, 0, self.group)
        except RuntimeError:
            logging.info("TTPServer shutting down.")
Beispiel #5
0
def run_party(cid, func, rank, world_size, master_addr, master_port, func_args,
              func_kwargs):
    """Start crypten party localy and run computation.
    Args:
        cid (int): CrypTen computation id.
        func (function): computation to be done.
        rank (int): rank of the crypten party.
        world_size (int): number of crypten parties involved in the computation.
        master_addr (str): IP address of the master party (party with rank 0).
        master_port (int or str): port of the master party (party with rank 0).
        func_args (list): arguments to be passed to func.
        func_kwargs (dict): keyword arguments to be passed to func.
    Returns:
        The return value of func.
    """

    process, queue = _new_party(cid, func, rank, world_size, master_addr,
                                master_port, func_args, func_kwargs)
    was_initialized = DistributedCommunicator.is_initialized()
    if was_initialized:
        crypten.uninit()
    process.start()
    # wait for response
    res = queue.get()
    if was_initialized:
        crypten.init()
    return res
Beispiel #6
0
    def __init__(self):
        """Initializes a Trusted Third Party server that receives requests"""
        # Initialize connection
        crypten.init()
        self.ttp_group = comm.get().ttp_group
        self.comm_group = comm.get().ttp_comm_group
        self.device = "cpu"
        self._setup_generators()
        ttp_rank = comm.get().get_ttp_rank()

        logging.info("TTPServer Initialized")
        try:
            while True:
                # Wait for next request from client
                message = comm.get().recv_obj(0, self.ttp_group)
                logging.info("Message received: %s" % message)

                if message == "terminate":
                    logging.info("TTPServer shutting down.")
                    return

                function = message["function"]
                device = message["device"]
                args = message["args"]
                kwargs = message["kwargs"]

                self.device = device

                result = getattr(self, function)(*args, **kwargs)

                comm.get().send_obj(result.size(), 0, self.ttp_group)
                comm.get().broadcast(result, ttp_rank, self.comm_group)
        except RuntimeError as err:
            logging.info("Encountered Runtime error. TTPServer shutting down:")
            logging.info(f"{err}")
Beispiel #7
0
    def test_encode_decode(self):
        """Tests tensor encoding and decoding."""
        for float in [False, True]:
            if float:
                fpe = FixedPointEncoder(precision_bits=16)
            else:
                fpe = FixedPointEncoder(precision_bits=0)
            tensor = get_test_tensor(float=float)
            decoded = fpe.decode(fpe.encode(tensor))
            self._check(
                decoded,
                tensor,
                "Encoding/decoding a %s failed." % "float" if float else "long",
            )

        # Make sure encoding a subclass of CrypTensor is a no-op
        crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)
        crypten.init()

        tensor = get_test_tensor(float=True)
        encrypted_tensor = crypten.cryptensor(tensor)
        encrypted_tensor = fpe.encode(encrypted_tensor)
        self._check(
            encrypted_tensor.get_plain_text(),
            tensor,
            "Encoding an EncryptedTensor failed.",
        )

        # Try a few other types.
        fpe = FixedPointEncoder(precision_bits=0)
        for dtype in [torch.uint8, torch.int8, torch.int16]:
            tensor = torch.zeros(5, dtype=dtype).random_()
            decoded = fpe.decode(fpe.encode(tensor)).type(dtype)
            self._check(decoded, tensor, "Encoding/decoding a %s failed." % dtype)
Beispiel #8
0
    def setUp(self):
        super().setUp()
        # We don't want the main process (rank -1) to initialize the communicator
        if self.rank >= 0:
            crypten.init()

        # Testing debug mode
        set_debug_mode()
def resnet18_enc():
    model = torch.hub.load("pytorch/vision:v0.5.0",
                           "resnet18",
                           pretrained=True)
    dummy_input = torch.rand([1, 3, 224, 224])
    crypten.init()
    model_enc = crypten.nn.from_pytorch(model, dummy_input)
    return model_enc
Beispiel #10
0
 def _run_process(cls, rank, env, run_process_fn, fn_args):
     for env_key, env_value in env.items():
         os.environ[env_key] = env_value
     os.environ["RANK"] = str(rank)
     orig_logging_level = logging.getLogger().level
     logging.getLogger().setLevel(logging.INFO)
     crypten.init()
     logging.getLogger().setLevel(orig_logging_level)
     run_process_fn(fn_args)
Beispiel #11
0
    def test_in_first(self):
        # This will cause the parent process to init with world-size 1
        crypten.init()
        self.assertEqual(comm.get().get_world_size(), 1)

        # This will fork 2 children which will have to init with world-size 2
        self.assertEqual(test_rank_func(), [0, 1])

        # Make sure everything is the same in the parent
        self.assertEqual(comm.get().get_world_size(), 1)
Beispiel #12
0
def _run_experiment(args):
    if args.plaintext:
        import plain_contextual_bandits as bandits
    else:
        import private_contextual_bandits as bandits

    learner_func = build_learner(args, bandits, download_mnist)
    import crypten

    crypten.init()
    learner_func()
 def __init__(self, point_array, n_dust=1, radius=0.1, if_plot=True):
     crypten.init()
     torch.set_num_threads(1)
     self.upper_x = 1
     self.lower_x = 0
     self.upper_y = 1
     self.lower_y = 0
     self.n_point = point_array.shape[0]
     self.n_dust = n_dust
     self.radius = radius
     self.point_array = point_array
     # point_gen([self.lower_x,self.upper_x], [self.lower_y,self.upper_y], self.n_center, self.n_point, radius = self.radius, if_plot = if_plot)
     self.dust_array = sample_dust(self.point_array, self.n_dust)
Beispiel #14
0
    def __init__(self, n_samples=5000, n_features=20):
        self.n_samples = n_samples
        self.n_features = n_features
        x, x_test, y, y_test = GaussianClusters.generate_data(
            n_samples, n_features)
        self.x, self.y = x, y
        self.x_test, self.y_test = x_test, y_test

        crypten.init()
        self.x_enc = crypten.cryptensor(self.x)
        self.y_enc = crypten.cryptensor(self.y)
        self.x_test_enc = crypten.cryptensor(self.x_test)
        self.y_test_enc = crypten.cryptensor(self.y_test)
Beispiel #15
0
def run_mpc_autograd_cnn(
    context_manager=None,
    num_epochs=3,
    learning_rate=0.001,
    batch_size=5,
    print_freq=5,
    num_samples=100,
):
    """
    Args:
        context_manager: used for setting proxy settings during download.
    """
    crypten.init()

    data_alice, data_bob, train_labels = preprocess_mnist(context_manager)
    rank = comm.get().get_rank()

    # assumes at least two parties exist
    # broadcast dummy data with same shape to remaining parties
    if rank == 0:
        x_alice = data_alice
    else:
        x_alice = torch.empty(data_alice.size())

    if rank == 1:
        x_bob = data_bob
    else:
        x_bob = torch.empty(data_bob.size())

    # encrypt
    x_alice_enc = crypten.cryptensor(x_alice, src=0)
    x_bob_enc = crypten.cryptensor(x_bob, src=1)

    # combine feature sets
    x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2)
    x_combined_enc = x_combined_enc.unsqueeze(1)

    # reduce training set to num_samples
    x_reduced = x_combined_enc[:num_samples]
    y_reduced = train_labels[:num_samples]

    # encrypt plaintext model
    model_plaintext = CNN()
    dummy_input = torch.empty((1, 1, 28, 28))
    model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
    model.train()
    model.encrypt()

    # encrypted training
    train_encrypted(x_reduced, y_reduced, model, num_epochs, learning_rate,
                    batch_size, print_freq)
Beispiel #16
0
    def test_in_first(self):
        # TODO: Make this work with TTP provider
        crypten.mpc.set_default_provider(
            crypten.mpc.provider.TrustedFirstParty)

        # This will cause the parent process to init with world-size 1
        crypten.init()
        self.assertEqual(comm.get().get_world_size(), 1)

        # This will fork 2 children which will have to init with world-size 2
        self.assertEqual(test_rank_func(), [0, 1])

        # Make sure everything is the same in the parent
        self.assertEqual(comm.get().get_world_size(), 1)
Beispiel #17
0
    def test_is_initialized(self):
        """Tests that the is_initialized flag is set properly"""
        comm = crypten.communicator

        self.assertTrue(crypten.is_initialized())
        self.assertTrue(comm.is_initialized())

        crypten.uninit()
        self.assertFalse(crypten.is_initialized())
        self.assertFalse(comm.is_initialized())

        crypten.init()
        self.assertTrue(crypten.is_initialized())
        self.assertTrue(comm.is_initialized())
Beispiel #18
0
    def __init__(self):
        self.x = self.preprocess_image()
        # image net 1k classes
        class_id = 463
        self.y = torch.tensor([class_id]).long()
        self.x_test, self.y_test = self.x, self.y

        crypten.init()
        self.x_enc = crypten.cryptensor(self.x)
        y_one_hot = torch.zeros(1, 1000)
        y_one_hot[0][class_id] = 1
        self.y_enc = crypten.cryptensor(y_one_hot)
        self.x_test_enc = crypten.cryptensor(self.x_test)
        self.y_test_enc = crypten.cryptensor(self.y_test)
Beispiel #19
0
    def __init__(self, n_samples=5000, n_features=20, epochs=50, lr_rate=0.1):
        self.n_samples = n_samples
        self.n_features = n_features
        self.epochs = epochs
        self.lr_rate = lr_rate

        self.df = None

        data = ModelBenchmarks.generate_data(n_samples, n_features)
        self.x, self.x_test, self.y, self.y_test = data

        crypten.init()
        self.x_enc = crypten.cryptensor(self.x)
        self.y_enc = crypten.cryptensor(self.y)
        self.x_test_enc = crypten.cryptensor(self.x_test)
 def _run_process(cls, rank, world_size, env, run_process_fn, fn_args):
     for env_key, env_value in env.items():
         os.environ[env_key] = env_value
     os.environ["RANK"] = str(rank)
     orig_logging_level = logging.getLogger().level
     logging.getLogger().setLevel(logging.INFO)
     if hasattr(fn_args, "device"):
         crypten.init(device=fn_args.device)
     else:
         crypten.init()
     logging.getLogger().setLevel(orig_logging_level)
     if fn_args is None:
         run_process_fn()
     else:
         run_process_fn(fn_args)
Beispiel #21
0
def _launch(func, rank, world_size, rendezvous_file, queue, func_args, func_kwargs):

    communicator_args = {
        "WORLD_SIZE": world_size,
        "RANK": rank,
        "RENDEZVOUS": "file://%s" % rendezvous_file,
        "BACKEND": "gloo",
    }
    for key, val in communicator_args.items():
        os.environ[key] = str(val)

    crypten.init()

    return_value = func(*func_args, **func_kwargs)
    queue.put((rank, return_value))
Beispiel #22
0
    def test_name(self):
        # Test default name is correct
        self.assertEqual(comm.get().get_name(), f"rank{comm.get().get_rank()}")

        # Test name set / get
        comm.get().set_name(f"{comm.get().get_rank()}")
        self.assertEqual(comm.get().get_name(), f"{comm.get().get_rank()}")

        # Test initialization using crypten.init()
        name = f"init_{comm.get().get_rank()}"
        crypten.uninit()
        crypten.init(party_name=name)
        self.assertEqual(comm.get().get_name(), f"init_{comm.get().get_rank()}")

        # Test failure on bad input
        for improper_input in [0, None, ["name"], ("name",)]:
            with self.assertRaises(AssertionError):
                comm.get().set_name(improper_input)
    def test_is_initialized(self):
        """Tests that the is_initialized flag is set properly"""
        comm = crypten.communicator

        self.assertTrue(crypten.is_initialized())
        self.assertTrue(comm.is_initialized())

        crypten.uninit()
        self.assertFalse(crypten.is_initialized())
        self.assertFalse(comm.is_initialized())

        # note that uninit() kills the TTP process, so we need to restart it:
        if self.rank == self.MAIN_PROCESS_RANK and crypten.mpc.ttp_required():
            self.processes += [self._spawn_ttp()]

        crypten.init()
        self.assertTrue(crypten.is_initialized())
        self.assertTrue(comm.is_initialized())
Beispiel #24
0
def run_mpc_linear_svm(epochs=50,
                       examples=50,
                       features=100,
                       lr=0.5,
                       skip_plaintext=False):
    crypten.init()

    # Set random seed for reproducibility
    torch.manual_seed(1)

    # Initialize x, y, w, b
    x = torch.randn(features, examples)
    w_true = torch.randn(1, features)
    b_true = torch.randn(1)
    y = w_true.matmul(x) + b_true
    y = y.sign()

    if not skip_plaintext:
        logging.info("==================")
        logging.info("PyTorch Training")
        logging.info("==================")
        w_torch, b_torch = train_linear_svm(x, y, lr=lr, print_time=True)

    # Encrypt features / labels
    x = crypten.cryptensor(x)
    y = crypten.cryptensor(y)

    logging.info("==================")
    logging.info("CrypTen Training")
    logging.info("==================")
    w, b = train_linear_svm(x, y, lr=lr, print_time=True)
    pdb.set_trace()

    if not skip_plaintext:
        logging.info("PyTorch Weights  :")
        logging.info(w_torch)
    logging.info("CrypTen Weights:")
    logging.info(w.get_plain_text())

    if not skip_plaintext:
        logging.info("PyTorch Bias  :")
        logging.info(b_torch)
    logging.info("CrypTen Bias:")
    logging.info(b.get_plain_text())
Beispiel #25
0
        def wrapper(*args, **kwargs):
            rendezvous_file = tempfile.NamedTemporaryFile(delete=True).name
            queue = multiprocessing.Queue()

            processes = [
                multiprocessing.Process(
                    target=_launch,
                    args=(func, rank, world_size, rendezvous_file, queue, args, kwargs),
                )
                for rank in range(world_size)
            ]

            # This process will be forked and we need to re-initialize the
            # communicator in the children. If the parent process happened to
            # call crypten.init(), which might be valid in a Jupyter notebook
            # for instance, then the crypten.init() call on the children
            # process will not do anything. The call to uninit here makes sure
            # we actually get to initialize the communicator on the child
            # process.  An alternative fix for this issue would be to use spawn
            # instead of fork, but we run into issues serializing the function
            # in that case.
            was_initialized = DistributedCommunicator.is_initialized()
            if was_initialized:
                crypten.uninit()

            for process in processes:
                process.start()

            for process in processes:
                process.join()

            if was_initialized:
                crypten.init()

            successful = [process.exitcode == 0 for process in processes]
            if not all(successful):
                logging.error("One of the parties failed. Check past logs")
                return None

            return_values = []
            while not queue.empty():
                return_values.append(queue.get())

            return [value for _, value in sorted(return_values, key=itemgetter(0))]
Beispiel #26
0
    def test_config(self):
        """Checks setting configuartion with config manager works"""
        # Set the config directly
        crypten.init()

        cfgs = [
            "functions.exp_iterations",
            "functions.max_method",
        ]

        for _cfg in cfgs:
            cfg[_cfg] = 10
            self.assertTrue(cfg[_cfg] == 10, "cfg.set failed")

            # Set with a context manager
            with cfg.temp_override({_cfg: 3}):
                self.assertTrue(cfg[_cfg] == 3,
                                "temp_override failed to set values")
            self.assertTrue(cfg[_cfg] == 10, "temp_override values persist")
Beispiel #27
0
def _launch(func, rank, world_size, master_addr, master_port, queue, func_args,
            func_kwargs):
    communicator_args = {
        "RANK": rank,
        "WORLD_SIZE": world_size,
        "RENDEZVOUS": "env://",
        "MASTER_ADDR": master_addr,
        "MASTER_PORT": master_port,
        "DISTRIBUTED_BACKEND": "gloo",
    }
    for key, val in communicator_args.items():
        os.environ[key] = str(val)

    crypten.init()
    return_value = func(*func_args, **func_kwargs)
    crypten.uninit()

    return_value = utils.pack_values(return_value)
    queue.put(return_value)
Beispiel #28
0
def main():
    """Runs benchmarks and saves if path is provided"""
    crypten.init()
    args = get_args()
    device = torch.device(args.device)

    if not hasattr(crypten.nn.Module, "to") or not hasattr(
            crypten.mpc.MPCTensor, "to"):
        if device.type == "cuda":
            print(
                "GPU computation is not supported for this version of CrypTen, benchmark will be skipped"
            )
            return

    benchmarks = [
        FuncBenchmarks(device=device),
        ModelBenchmarks(device=device, advanced_models=args.advanced_models),
    ]

    if args.only_functions:
        benchmarks = [FuncBenchmarks(device=device)]

    if args.world_size > 1:
        if args.ttp:
            crypten.mpc.set_default_provider(
                crypten.mpc.provider.TrustedThirdParty)
        launcher = multiprocess_launcher.MultiProcessLauncher(
            args.world_size, multiprocess_caller, fn_args=args)
        launcher.start()
        launcher.join()
        launcher.terminate()

    else:
        pd.set_option("display.precision", 3)
        for benchmark in benchmarks:
            benchmark.run()
            print(benchmark)
            if args.path:
                benchmark.save(args.path)
Beispiel #29
0
    def run(self):
        """Runs and stores benchmarks in self.df"""
        crypten.init()
        runtimes_plain_text, runtimes_crypten = self.get_runtimes()

        abs_errors, abs_errors_iqr, relative_errors, relative_errors_iqr = (
            self.get_errors())
        self.df = pd.DataFrame.from_dict({
            "function":
            FuncBenchmarks.UNARY + FuncBenchmarks.BINARY,
            "runtime plain text": [r.avg for r in runtimes_plain_text],
            "runtime plain text IQR": [r.iqr for r in runtimes_plain_text],
            "runtime crypten": [r.avg for r in runtimes_crypten],
            "runtime crypten IQR": [r.iqr for r in runtimes_crypten],
            "abs error":
            abs_errors,
            "abs error IQR":
            abs_errors_iqr,
            "relative error":
            relative_errors,
            "relative error IQR":
            relative_errors_iqr,
        })
Beispiel #30
0
def test():
    crypten.init()
    rank = comm.get().get_rank()

    name = names[rank]
    filename = f"dataset/{name}/train.npz"

    mpc_tensor = load_encrypt_tensor(filename)
    feature, label = mpc_tensor[:32, :-1], mpc_tensor[:32, -1]
    print(feature.shape, feature.ptype)

    model = MLP()
    mpc_model = make_mpc_model(model)
    loss = crypten.nn.BCELoss()

    mpc_model.train()
    out = mpc_model(feature)
    prob = out.sigmoid()
    loss_val = loss(prob, label)

    mpc_model.zero_grad()
    loss_val.backward()
    mpc_model.update_parameters(1e-3)