Пример #1
0
                        # then it doesn't need to be wrapped.
                        # if the object is not a wrapper BUT it's also not a framework object,
                        # then it needs to be wrapped or else it won't be able to be used
                        # by other interfaces
                        obj = obj.wrap()

            return obj
        # Else we keep the same Pointer
        else:

            location = syft.hook.local_worker.get_worker(worker_id)

            ptr = ObjectPointer(
                location=location,
                id_at_location=id_at_location,
                owner=worker,
                id=obj_id,
                garbage_collect_data=garbage_collect_data,
            )

            return ptr


### Register the object with hook_args.py ###
register_type_rule({ObjectPointer: one})
register_forward_func({
    ObjectPointer:
    lambda p: (_ for _ in ()).throw(RemoteObjectFoundError(p))
})
register_backward_func({ObjectPointer: lambda i: i})
Пример #2
0
                        tensor = tensor.wrap()

            return tensor
        # Else we keep the same Pointer
        else:
            location = syft.hook.local_worker.get_worker(worker_id)

            ptr = PointerTensor(
                location=location,
                id_at_location=obj_id_at_location,
                owner=worker,
                id=obj_id,
                shape=shape,
                garbage_collect_data=garbage_collect_data,
            )

            return ptr

    @staticmethod
    def get_protobuf_schema() -> PointerTensorPB:
        return PointerTensorPB


### Register the tensor with hook_args.py ###
register_type_rule({PointerTensor: one})
register_forward_func({
    PointerTensor:
    lambda p: (_ for _ in ()).throw(RemoteObjectFoundError(p))
})
register_backward_func({PointerTensor: lambda i: i})
Пример #3
0
            ), "When sharing a LargePrecisionTensor, the field of the resulting AdditiveSharingTensor \
                    must be the same as the one of the original tensor"

        self.child = self.child.share(*owners,
                                      field=field,
                                      crypto_provider=crypto_provider,
                                      no_wrap=True)
        return self


# The size of each type
type_precision = {
    torch.uint8: 8,
    torch.int8: 8,
    torch.int16: 16,
    torch.short: 16,
    torch.int32: 32,
    torch.int: 32,
    torch.int64: 64,
    torch.long: 64,
}

### Register the tensor with hook_args.py ###
register_type_rule({LargePrecisionTensor: one})
register_forward_func(
    {LargePrecisionTensor: lambda i: LargePrecisionTensor._forward_func(i)})
register_backward_func({
    LargePrecisionTensor:
    lambda i, **kwargs: LargePrecisionTensor._backward_func(i, **kwargs)
})
Пример #4
0
            requires_grad,
            preinitialize_grad,
            grad_fn,
            tags,
            description,
        ) = tensor_tuple

        if chain is not None:
            chain = syft.serde.msgpack.serde._detail(worker, chain)

        tensor = AutogradTensor(
            owner=worker,
            id=syft.serde.msgpack.serde._detail(worker, tensor_id),
            requires_grad=requires_grad,  # ADDED!
            preinitialize_grad=preinitialize_grad,
            # local_autograd=local_autograd,
            grad_fn=syft.serde.msgpack.serde._detail(worker, grad_fn),
            data=chain,  # pass the de-serialized data
            tags=syft.serde.msgpack.serde._detail(worker, tags),
            description=syft.serde.msgpack.serde._detail(worker, description),
        )

        return tensor


register_type_rule({AutogradTensor: one})
register_forward_func({AutogradTensor: get_child})
register_backward_func(
    {AutogradTensor: lambda i, **kwargs: AutogradTensor(data=i).on(i, wrap=False)}
)
Пример #5
0
        """
        obj_id, chain = tensor_tuple
        struct = sy.serde.msgpack.serde._detail(worker, chain)

        if isinstance(struct, dict) and chain is not None:
            tensor = PaillierTensor(owner=worker, id=obj_id)
            public_key = struct["public_key"]
            pub = PaillierPublicKey(n=int(public_key["n"]))
            if isinstance(struct["values"][0], list):
                values = [[
                    EncryptedNumber(pub, int(x[0]), int(x[1])) for x in y
                ] for y in struct["values"]]
            else:
                values = [
                    EncryptedNumber(pub, int(x[0]), int(x[1]))
                    for x in struct["values"]
                ]
            tensor.child = np.array(values)
            syft_tensor = tensor.wrap()
            return syft_tensor
        else:
            raise TypeError(type(struct))


register_type_rule({PaillierTensor: one})
register_forward_func({PaillierTensor: get_child})
register_backward_func({
    PaillierTensor:
    lambda i, **kwargs: PaillierTensor().on(i, wrap=False)
})
Пример #6
0
    def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "LoggingTensor":
        """
        This function reconstructs a LogTensor given it's attributes in form of a tuple.
        Args:
            worker: the worker doing the deserialization
            tensor_tuple: a tuple holding the attributes of the LogTensor
        Returns:
            LoggingTensor: a LogTensor
        Examples:
            logtensor = detail(data)
        """
        obj_id, chain = tensor_tuple

        tensor = LoggingTensor(owner=worker,
                               id=sy.serde.msgpack.serde._detail(
                                   worker, obj_id))

        if chain is not None:
            chain = sy.serde.msgpack.serde._detail(worker, chain)
            tensor.child = chain

        return tensor


register_type_rule({LoggingTensor: one})
register_forward_func({LoggingTensor: get_child})
register_backward_func({
    LoggingTensor:
    lambda i, **kwargs: LoggingTensor().on(i, wrap=False)
})
Пример #7
0
    "permute",
    "reshape",
    "split",
    "stack",
    "sub_",
    "view",
}

ambiguous_functions = {
    "torch.unbind",
    "unbind",
    "torch.stack",
    "stack",
    "torch.cat",
    "cat",
    "torch.mean",
    "torch.sum",
    "torch.chunk",
    "chunk",
    "torch.functional.split",
    "torch.split",
    "split",
    "backward",
}

register_ambiguous_method(*ambiguous_methods)
register_ambiguous_function(*ambiguous_functions)
register_type_rule(type_rule)
register_forward_func(forward_func)
register_backward_func(backward_func)