Example #1
0
def save(obj, f, save_closure=torch.save, **kwargs):
    """
    Saves the shares of CrypTensor or an encrypted model to a file.

    Args:
        obj: The CrypTensor or PyTorch tensor to be saved
        f: a file-like object (has to implement `read()`, `readline()`,
              `tell()`, and `seek()`), or a string containing a file name
        save_closure: Custom save function that matches the interface of `torch.save`,
        to be used when the tensor is saved with a custom load function in
        `crypten.load`. Additional kwargs are passed on to the closure.
    """
    # TODO: Add support for saving to correct device (kwarg: map_location=device)
    save_closure(obj, f, **kwargs)
    comm.get().barrier()
Example #2
0
    def verify_compare(self):
        rank = comm.get().get_rank()

        #Receive secret share from distcal step.
        with open('compare_results_{}.pickle'.format(rank), 'rb') as handle:
            dist_dict = pickle.load(handle)
        results_share_list = dist_dict["distance_results_rank{}".format(rank)]

        with open('dist_rank_{}.pickle'.format(rank), 'rb') as handle:
            dist_dict = pickle.load(handle)
        dist_enc = dist_dict["distance_share_list_rank{}".format(rank)]

        n_dust = len(dist_enc)
        #Verify each distance
        if rank == 0:
            print("=========Start of Verification========")
        for i in range(n_dust):
            for j in range(self.n_point):
                gt_dist = dist_enc[i][j]
                radius_tensor = torch.ones(gt_dist.shape) * self.radius
                gt_calculated = (gt_dist <= radius_tensor).get_plain_text()

                if rank == 0:
                    print("Ground-Truth is: not implemented")
                decrypted = results_share_list[i][j].get_plain_text()
                if rank == 0:
                    print("Decrypted is: not implemented")
        if rank == 0:
            print("=========End of Verification========")
Example #3
0
    def __init__(self, tensor=None, size=None, src=0, device=None):
        if src == SENTINEL:
            return
        assert (isinstance(src, int) and src >= 0
                and src < comm.get().get_world_size()), "invalid tensor source"

        if device is None and hasattr(tensor, "device"):
            device = tensor.device

        #  Assume 0 bits of precision unless encoder is set outside of init
        self.encoder = FixedPointEncoder(precision_bits=0)
        if tensor is not None:
            tensor = self.encoder.encode(tensor)
            tensor = tensor.to(device=device)
            size = tensor.size()

        # Generate Psuedo-random Sharing of Zero and add source's tensor
        self.share = BinarySharedTensor.PRZS(size, device=device).share
        if self.rank == src:
            assert tensor is not None, "Source must provide a data tensor"
            if hasattr(tensor, "src"):
                assert (
                    tensor.src == src
                ), "Source of data tensor must match source of encryption"
            self.share ^= tensor
Example #4
0
def load(tag: str, src: int, **kwargs):
    """TODO: Think of a method to keep the serialized models at the workers that are part of the
    computation in such a way that the worker that started the computation do not know what
    model architecture is used

    if tag.startswith("crypten_model"):
        worker = get_worker_from_rank(src)
        results = worker.search(tag)
        assert len(results) == 1

        model = results[0]
        assert isinstance(model, OnnxModel)

        return utils.onnx_to_crypten(model.serialized_model)
    """

    if src == comm.get().get_rank():
        if CID is None:
            raise RuntimeError("CrypTen computation id is not set.")

        worker = get_worker_from_rank(src)
        results = worker.search(tag)

        # Make sure there is only one result
        assert len(results) == 1

        result = crypten.load_from_party(preloaded=results[0],
                                         src=src,
                                         **kwargs)

    else:
        result = crypten.load_from_party(preloaded=-1, src=src, **kwargs)

    return result
Example #5
0
def wraps(x):
    """Privately computes the number of wraparounds for a set a shares

    To do so, we note that:
        [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]

    Where [theta_i] is the wraps for a variable i
          [beta_ij] is the differential wraps for variables i and j
          [eta_ij]  is the plaintext wraps for variables i and j

    Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
    can make the assumption that [eta_xr] = 0 with high probability.
    """
    provider = crypten.mpc.get_default_provider()
    r, theta_r = provider.wrap_rng(x.size())
    beta_xr = theta_r.clone()
    beta_xr._tensor = count_wraps([x._tensor, r._tensor])

    z = x + r
    theta_z = comm.get().gather(z._tensor, 0)
    theta_x = beta_xr - theta_r

    # TODO: Incorporate eta_xr
    if x.rank == 0:
        theta_z = count_wraps(theta_z)
        theta_x._tensor += theta_z
    return theta_x
Example #6
0
    def generate_binary_triple(size0, size1, device=None):
        """Generate binary triples of given size"""
        generator = TTPClient.get().get_generator(device=device)

        a = generate_kbit_random_tensor(size0,
                                        generator=generator,
                                        device=device)
        b = generate_kbit_random_tensor(size1,
                                        generator=generator,
                                        device=device)

        if comm.get().get_rank() == 0:
            # Request c from TTP
            c = TTPClient.get().ttp_request("binary", device, size0, size1)
        else:
            size2 = torch.broadcast_tensors(a, b)[0].size()
            c = generate_kbit_random_tensor(size2,
                                            generator=generator,
                                            device=device)

        # Stack to vectorize scatter function
        a = BinarySharedTensor.from_shares(a)
        b = BinarySharedTensor.from_shares(b)
        c = BinarySharedTensor.from_shares(c)
        return a, b, c
Example #7
0
    def generate_additive_triple(size0,
                                 size1,
                                 op,
                                 device=None,
                                 *args,
                                 **kwargs):
        """Generate multiplicative triples of given sizes"""
        generator = TTPClient.get().get_generator(device=device)

        a = generate_random_ring_element(size0,
                                         generator=generator,
                                         device=device)
        b = generate_random_ring_element(size1,
                                         generator=generator,
                                         device=device)
        if comm.get().get_rank() == 0:
            # Request c from TTP
            c = TTPClient.get().ttp_request("additive", device, size0, size1,
                                            op, *args, **kwargs)
        else:
            # TODO: Compute size without executing computation
            c_size = getattr(torch, op)(a, b, *args, **kwargs).size()
            c = generate_random_ring_element(c_size,
                                             generator=generator,
                                             device=device)

        a = ArithmeticSharedTensor.from_shares(a, precision=0)
        b = ArithmeticSharedTensor.from_shares(b, precision=0)
        c = ArithmeticSharedTensor.from_shares(c, precision=0)

        return a, b, c
def get_random_test_tensor(
    max_value=6, min_value=None, size=(1, 5), is_float=False, ex_zero=False
):
    """Generates random tensor for testing

    Args:
        max_value (int): defines maximum value for int tensor
        min_value (int): defines minimum value for int tensor
        size (tuple): size of tensor
        is_float (bool): determines float or int tensor
        ex_zero (bool): excludes zero tensor

    Returns: torch.tensor
    """
    if min_value is None:
        min_value = -max_value
    if is_float:
        tensor = torch.rand(torch.Size(size)) * (max_value - min_value) + min_value
    else:
        tensor = torch.randint(
            min_value, max_value, torch.Size(size), dtype=torch.int64
        )
    if ex_zero:
        # replace 0 with 1
        tensor[tensor == 0] = 1

    # Broadcast this tensor to the world so that the generated random tensor
    # is in sync in all distributed processes. See T45688819 for more
    # information.
    tensor = comm.get().broadcast(tensor, 0)

    return tensor
Example #9
0
 def reveal(self):
     """Get plaintext without any downscaling"""
     shares = comm.get().all_gather(self.share)
     result = shares[0]
     for x in shares[1:]:
         result = result ^ x
     return result
Example #10
0
    def __init__(self,
                 tensor=None,
                 size=None,
                 precision=None,
                 src=0,
                 device=None):
        if src == SENTINEL:
            return
        assert (isinstance(src, int) and src >= 0
                and src < comm.get().get_world_size()), "invalid tensor source"

        if device is None and hasattr(tensor, "device"):
            device = tensor.device

        self.encoder = FixedPointEncoder(precision_bits=precision)
        if tensor is not None:
            if is_int_tensor(tensor) and precision != 0:
                tensor = tensor.float()
            tensor = self.encoder.encode(tensor)
            tensor = tensor.to(device=device)
            size = tensor.size()

        # Generate psuedo-random sharing of zero (PRZS) and add source's tensor
        self.share = ArithmeticSharedTensor.PRZS(size, device=device).share
        if self.rank == src:
            assert tensor is not None, "Source must provide a data tensor"
            if hasattr(tensor, "src"):
                assert (
                    tensor.src == src
                ), "Source of data tensor must match source of encryption"
            self.share += tensor
Example #11
0
    def div_(self, y):
        """Divide two tensors element-wise"""
        # TODO: Add test coverage for this code path (next 4 lines)
        if isinstance(y, float) and int(y) == y:
            y = int(y)
        if is_float_tensor(y) and y.frac().eq(0).all():
            y = y.long()

        if isinstance(y, int) or is_int_tensor(y):
            # Truncate protocol for dividing by public integers:
            if comm.get().get_world_size() > 2:
                wraps = self.wraps()
                self.share /= y
                # NOTE: The multiplication here must be split into two parts
                # to avoid long out-of-bounds when y <= 2 since (2 ** 63) is
                # larger than the largest long integer.
                self -= wraps * 4 * (int(2 ** 62) // y)
            else:
                self.share /= y
            return self

        # Otherwise multiply by reciprocal
        if isinstance(y, float):
            y = torch.FloatTensor([y])

        assert is_float_tensor(y), "Unsupported type for div_: %s" % type(y)
        return self.mul_(y.reciprocal())
Example #12
0
    def PRZS(*size):
        """
        Generate a Pseudo-random Sharing of Zero (using arithmetic shares)

        This function does so by generating `n` numbers across `n` parties with
        each number being held by exactly 2 parties. Therefore, each party holds
        two numbes. A zero sharing is found by having each party xor their two
        numbers together.
        """
        tensor = BinarySharedTensor(src=SENTINEL)
        current_share = generate_kbit_random_tensor(*size,
                                                    generator=comm.get().g0)
        next_share = generate_kbit_random_tensor(*size,
                                                 generator=comm.get().g1)
        tensor.share = current_share ^ next_share
        return tensor
Example #13
0
def init_app(relay_predictor_host, img_size):
    app = web.Application()

    if comm.get().get_rank() == PREDICTOR:
        q = ImageQueueDoctor(img_size=img_size)
    else:
        q = ImageQueuePatient( img_size=img_size, relay_to=relay_predictor_host + "/image")

    app.add_routes([web.get("/", index),
                    web.post("/image", q.image)])

    if comm.get().get_rank() == PREDICTOR:
        app.add_routes([web.get("/decision", q.decision),
                        web.post("/make_decision", q.make_decision)])

    return app, q
Example #14
0
 def test_global_generator(self):
     """Tests that global generator is generated properly"""
     # Check that all seeds are the same
     for device in crypten.generators["global"].keys():
         this_generator = crypten.generators["global"][device].initial_seed()
         generator0 = comm.get().broadcast_obj(this_generator, 0)
         self.assertEqual(this_generator, generator0)
Example #15
0
    def ne(self, y, _scale=True):
        """Returns self != y"""
        if comm.get().get_world_size() == 2:
            return 1 - self.eq(y, _scale=_scale)

        difference = self - y
        difference.share = torch_stack([difference.share, -(difference.share)])
        return difference._ltz(_scale=_scale).sum(0)
Example #16
0
    def PRZS(*size, device=None):
        """
        Generate a Pseudo-random Sharing of Zero (using arithmetic shares)

        This function does so by generating `n` numbers across `n` parties with
        each number being held by exactly 2 parties. One of these parties adds
        this number while the other subtracts this number.
        """
        tensor = ArithmeticSharedTensor(src=SENTINEL)
        current_share = generate_random_ring_element(
            *size, generator=comm.get().get_generator(0, device=device), device=device
        )
        next_share = generate_random_ring_element(
            *size, generator=comm.get().get_generator(1, device=device), device=device
        )
        tensor.share = current_share - next_share
        return tensor
Example #17
0
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
    """Saves checkpoint of plaintext model"""
    # only save from rank 0 process to avoid race condition
    rank = comm.get().get_rank()
    if rank == 0:
        torch.save(state, filename)
        if is_best:
            shutil.copyfile(filename, "model_best.pth.tar")
Example #18
0
def _A2B(arithmetic_tensor):
    binary_tensor = BinarySharedTensor.stack([
        BinarySharedTensor(arithmetic_tensor.share, src=i)
        for i in range(comm.get().get_world_size())
    ])
    binary_tensor = binary_tensor.sum(dim=0)
    binary_tensor.encoder = arithmetic_tensor.encoder
    return binary_tensor
Example #19
0
 def test_all_gather_random(self):
     sizes = [(), (1, ), (5, ), (5, 5), (5, 5, 5)]
     for size in sizes:
         tensor = get_random_test_tensor(size=size)
         result = comm.get().all_gather(tensor)
         self.assertTrue(isinstance(result, list))
         for res in result:
             self.assertTrue((res == tensor).all())
Example #20
0
    def test_batched_all_reduce(self):
        sizes = [(), (1, ), (5, ), (5, 5), (5, 5, 5)]
        tensors = [get_random_test_tensor(size=size) for size in sizes]

        results = comm.get().all_reduce(tensors, batched=True)
        self.assertTrue(isinstance(results, list))
        for i, result in enumerate(results):
            self.assertTrue((result == (tensors[i] * self.world_size)).all())
Example #21
0
 def test_gather(self):
     tensor = torch.tensor([self.rank])
     for rank in range(self.world_size):
         result = comm.get().gather(tensor, rank)
         if rank == self.rank:
             self.assertEqual(result, [torch.tensor([0]), torch.tensor([1])])
         else:
             self.assertIsNone(result[0])
Example #22
0
    def test_broadcast_obj(self):
        TEST_OBJECTS = [
            {
                "a": 1,
                "b": 2,
                "c": 3
            },
            torch.tensor(1),
            torch.nn.Linear(10, 5),
            CNN(),
        ]
        for param in TEST_OBJECTS[2].parameters():
            param.data.fill_(1.0)
        for param in TEST_OBJECTS[3].parameters():
            param.data.fill_(1.0)
        serial.register_safe_class(CNN)

        for reference in TEST_OBJECTS:
            for src in range(self.world_size):
                test_obj = reference if self.rank == src else None
                test_obj = comm.get().broadcast_obj(test_obj, src)
                if isinstance(reference, torch.nn.Module):
                    test_obj_params = list(test_obj.parameters())
                    reference_params = list(reference.parameters())
                    for i, param in enumerate(reference_params):
                        self.assertTrue(test_obj_params[i].eq(param).all(),
                                        "broadcast_obj failed")
                else:
                    self.assertEqual(test_obj, reference,
                                     "broadcast_obj failed")

        # Test that the restricted loader will raise an error for code injection
        for invalid_obj in INVALID_SERIALIZED_OBJECTS:
            for src in range(self.world_size):
                if self.rank == src:
                    # Mimic broadcast_obj without pickling invalid bytestream
                    size = torch.tensor(len(invalid_obj), dtype=torch.int32)
                    arr = torch.from_numpy(
                        numpy.frombuffer(invalid_obj, dtype=numpy.int8))

                    dist.broadcast(size, src, group=comm.get().main_group)
                    dist.broadcast(arr, src, group=comm.get().main_group)
                else:
                    with self.assertRaises(ValueError):
                        test_obj = None
                        comm.get().broadcast_obj(test_obj, src)
Example #23
0
    def forward(self, input):
        # During eval mode, just conduct forward pass.
        if not self.training:
            if self.is_feature_src():
                return self.model(input)
            # Parties without model should return None
            return None

        if self.is_feature_src():
            self.logits = self.model(input)
            self.preds = self.logits.sigmoid()

            # Extract saved input to last layer from autograd tape if we need it
            if cfg.nn.dpsmpc.protocol == "layer_estimation":
                self.last_input = self.logits.grad_fn._saved_mat1

            # Check that prediction size matches cached size
            preds_size = self.preds.size()
            if "preds_size" in self.cache:
                cache_size = self.cache["preds_size"]
                if preds_size != cache_size:
                    raise ValueError(
                        f"Logit size does not match cached size: {preds_size} vs. {cache_size}"
                    )

            # Cache predictions size - Note batch size must match here
            # TODO: Handle batch dimension here
            if self.cache_pred_size:
                preds_size = self._communicate_and_cache(
                    "preds_size", preds_size)
            else:
                preds_size = comm.get().broadcast_obj(preds_size,
                                                      src=self.feature_src)
        else:
            # Cache predictions size - Note batch size must match here
            # TODO: Handle batch dimension here
            if self.cache_pred_size:
                preds_size = self._communicate_and_cache("preds_size", None)
            else:
                preds_size = comm.get().broadcast_obj(None,
                                                      src=self.feature_src)
            self.logits = torch.empty(preds_size)
            self.preds = torch.empty(preds_size)

        return self.logits
Example #24
0
    def __init__(self):
        """Initializes a Trusted Third Party server that receives requests"""
        # Initialize connection
        crypten.init()
        self.ttp_group = comm.get().ttp_group
        self.comm_group = comm.get().ttp_comm_group
        self.device = "cpu"
        self._setup_generators()

        logging.info("TTPServer Initialized")
        try:
            while True:
                # Wait for next request from client
                message = comm.get().recv_obj(0, self.ttp_group)
                logging.info("Message received: %s" % message)

                if message == "terminate":
                    logging.info("TTPServer shutting down.")
                    return

                function = message["function"]
                device = message["device"]
                args = message["args"]
                kwargs = message["kwargs"]

                self.device = device

                result = getattr(self, function)(*args, **kwargs)

                comm.get().send_obj(result.size(), 0, self.ttp_group)
                comm.get().broadcast(result, 2, self.comm_group)
        except RuntimeError:
            logging.info("Encountered Runtime error. TTPServer shutting down.")
Example #25
0
    def test_scatter(self):
        for rank in range(self.world_size):
            tensor = []
            if self.rank == rank:
                tensor = [torch.tensor(i) for i in range(self.world_size)]

            result = comm.get().scatter(tensor, rank, size=())
            self.assertTrue(torch.is_tensor(result))
            self.assertEqual(result.item(), self.rank)
Example #26
0
    def test_reduce(self):
        sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
        for rank in range(self.world_size):
            for size in sizes:
                tensor = get_random_test_tensor(size=size)
                result = comm.get().reduce(tensor, rank)

                if rank == self.rank:
                    self.assertTrue((result == (tensor * self.world_size)).all())
Example #27
0
    def test_broadcast(self):
        for rank in range(self.world_size):
            tensor = torch.LongTensor([0])
            if self.rank == rank:
                tensor += 1

            tensor = comm.get().broadcast(tensor, src=rank)
            self.assertTrue(torch.is_tensor(tensor))
            self.assertEqual(tensor.item(), 1)
Example #28
0
        def _setup_generators(self):
            seed = torch.empty(size=(), dtype=torch.long)
            dist.irecv(tensor=seed,
                       src=comm.get().get_ttp_rank(),
                       group=self.group).wait()
            dist.barrier(group=self.group)

            self.generator = torch.Generator()
            self.generator.manual_seed(seed.item())
Example #29
0
def multiprocess_caller(args):
    """Runs multiparty benchmarks and prints/saves from source 0"""
    for benchmark in args.benchmarks:
        benchmark.run()
        rank = comm.get().get_rank()
        if rank == 0:
            pd.set_option("display.precision", 3)
            print(benchmark)
            if args.path:
                benchmark.save(args.path)
Example #30
0
    def wrap_rng(size, num_parties):
        """Generate random shared tensor of given size and sharing of its wraps"""
        r = [generate_random_ring_element(size) for _ in range(num_parties)]
        theta_r = count_wraps(r)

        shares = comm.get().scatter(r, src=0)
        r = ArithmeticSharedTensor.from_shares(shares, precision=0)
        theta_r = ArithmeticSharedTensor(theta_r, precision=0, src=0)

        return r, theta_r