Пример #1
0
def validate(val_loader, model, criterion, print_freq=10, flatten=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, target) in enumerate(val_loader):
            # compute output
            if flatten:
                input = input.view(input.size(0), -1)
            if isinstance(model, crypten.nn.Module
                          ) and not crypten.is_encrypted_tensor(input):
                input = crypten.cryptensor(input)

            output = model(input)

            if crypten.is_encrypted_tensor(output):
                output = output.get_plain_text()
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            losses.add(loss.item(), input.size(0))
            top1.add(prec1[0], input.size(0))
            top5.add(prec5[0], input.size(0))

            # measure elapsed time
            current_batch_time = time.time() - end
            batch_time.add(current_batch_time)
            end = time.time()

            if (i + 1) % print_freq == 0:
                logging.info("\nTest: [{}/{}]\t"
                             "Time {:.3f} ({:.3f})\t"
                             "Loss {:.4f} ({:.4f})\t"
                             "Prec@1 {:.3f} ({:.3f})   \t"
                             "Prec@5 {:.3f} ({:.3f})".format(
                                 i + 1,
                                 len(val_loader),
                                 current_batch_time,
                                 batch_time.value(),
                                 loss.item(),
                                 losses.value(),
                                 prec1[0],
                                 top1.value(),
                                 prec5[0],
                                 top5.value(),
                             ))
            if i > 100:
                break

        logging.info(" * Prec@1 {:.3f} Prec@5 {:.3f}".format(
            top1.value(), top5.value()))

    return top1.value()
Пример #2
0
def validate(val_loader, model, criterion):
    # switch to evaluate mode
    model.eval()

    scores = []

    softmax = nn.Softmax(dim=1)
    with torch.no_grad():

        for i, (input, target) in enumerate(val_loader):
            if isinstance(model, crypten.nn.Module
                          ) and not crypten.is_encrypted_tensor(input):
                input = encrypt_data_tensor_with_src(input)
            # compute output
            output = model(input)
            if crypten.is_encrypted_tensor(output):
                output = output.get_plain_text()

            p = softmax(output)
            p, predicted = p.data.max(1)

            score = accuracy(predicted, target)
            scores.append(score)
            loss = criterion(output, target)

            print(f"validate {i}, loss {loss.data}, score {np.mean(scores)}")
    return np.mean(scores)
Пример #3
0
    def polynomial(self, coeffs, func="mul"):
        """Computes a polynomial function on a tensor with given coefficients,
        `coeffs`, that can be a list of values or a 1-D tensor.

        Coefficients should be ordered from the order 1 (linear) term first,
        ending with the highest order term. (Constant is not included).
        """
        # Coefficient input type-checking
        if isinstance(coeffs, list):
            coeffs = torch.tensor(coeffs)
        assert torch.is_tensor(coeffs) or crypten.is_encrypted_tensor(
            coeffs), "Polynomial coefficients must be a list or tensor"
        assert coeffs.dim(
        ) == 1, "Polynomial coefficients must be a 1-D tensor"

        # Handle linear case
        if coeffs.size(0) == 1:
            return self.mul(coeffs)

        # Compute terms of polynomial using exponentially growing tree
        terms = crypten.mpc.stack([self, self.square()])
        while terms.size(0) < coeffs.size(0):
            highest_term = terms[-1:].expand(terms.size())
            new_terms = getattr(terms, func)(highest_term)
            terms = crypten.cat([terms, new_terms])

        # Resize the coefficients for broadcast
        terms = terms[:coeffs.size(0)]
        for _ in range(terms.dim() - 1):
            coeffs = coeffs.unsqueeze(1)

        # Multiply terms by coefficients and sum
        return terms.mul(coeffs).sum(0)
Пример #4
0
    def validation_function(*args, **kwargs):
        with cfg.temp_override({"debug.validation_mode": False}):
            # Compute crypten result
            result_enc = func(*args, **kwargs)
            result = (
                result_enc.get_plain_text()
                if crypten.is_encrypted_tensor(result_enc)
                else result_enc
            )

            args = list(args)

            # Compute torch result for corresponding function
            for i, arg in enumerate(args):
                if crypten.is_encrypted_tensor(arg):
                    args[i] = args[i].get_plain_text()

            kwargs.pop("input_in_01", None)
            for key, value in kwargs.items():
                if crypten.is_encrypted_tensor(value):
                    kwargs[key] = value.get_plain_text()
            reference = getattr(self.get_plain_text(), func_name)(*args, **kwargs)

            # TODO: Validate properties - Issue is tuples can contain encrypted tensors
            if not torch.is_tensor(reference):
                return result_enc

            # Check sizes match
            if result.size() != reference.size():
                crypten_log(
                    f"Size mismatch: Expected {reference.size()} but got {result.size()}"
                )
                raise ValueError(f"Function {func_name} returned incorrect size")

            # Check that results match
            diff = (result - reference).abs_()
            norm_diff = diff.div(result.abs() + reference.abs()).abs_()
            test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
            test_passed = test_passed.gt(0).all().item() == 1
            if not test_passed:
                crypten_log(f"Function {func_name} returned incorrect values")
                crypten_log("Result %s" % result)
                crypten_log("Result - Reference = %s" % (result - reference))
                raise ValueError(f"Function {func_name} returned incorrect values")

        return result_enc
Пример #5
0
    def forward(self, input):
        assert isinstance(input, (list, tuple)), "input must be list or tuple"
        tensor, shape = input

        # shape is not data so we can get plain text
        if crypten.is_encrypted_tensor(shape):
            shape = shape.get_plain_text()
        return tensor.reshape(shape.long().tolist())
Пример #6
0
    def forward(self, input):
        assert isinstance(input, (list, tuple)), "input must be list or tuple"
        tensor, indices = input

        # indices are not data so we can get plain text:
        if crypten.is_encrypted_tensor(indices):
            indices = indices.get_plain_text().long()
        result = tensor.take(indices, self.dimension)
        return result
Пример #7
0
 def forward(ctx, input):
     input, other = input
     if crypten.is_encrypted_tensor(other):
         other_reciprocal = other.reciprocal()
         ctx.save_multiple_for_backward([input, other_reciprocal])
         return input.mul(other_reciprocal)
     else:
         ctx.save_multiple_for_backward([input.size(), other])
         return input.div(other)
Пример #8
0
    def validation_function(*args, **kwargs):
        crypten.debug.set_validation_mode(False)

        # skip if no reference to validate
        if not hasattr(torch.tensor([]), func_name):
            crypten_log(f"Skipping validation for {func_name}()")
            return func(*args, **kwargs)

        # Compute crypten result
        result_enc = func(*args, **kwargs)
        result = result_enc.get_plain_text()

        # Compute torch result for corresponding function
        for i, arg in enumerate(args):
            if crypten.is_encrypted_tensor(arg):
                args[i] = args[i].get_plain_text()
        for key, value in kwargs.items():
            if crypten.is_encrypted_tensor(value):
                kwargs[key] = value.get_plain_text()
        reference = getattr(self.get_plain_text(), func_name)(*args, **kwargs)

        # Check sizes match
        if result.size() != reference.size():
            crypten_log(
                f"Size mismatch: Expected {reference.size()} but got {result.size()}"
            )
            crypten.debug.set_validation_mode(True)
            raise ValueError(f"Function {func_name} returned incorrect size")

        # Check that results match
        diff = (result - reference).abs_()
        norm_diff = diff.div(result.abs() + reference.abs()).abs_()
        test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
        test_passed = test_passed.gt(0).all().item() == 1
        if not test_passed:
            crypten_log(f"Function {func_name} returned incorrect values")
            crypten_log("Result %s" % result)
            crypten_log("Result - Reference = %s" % (result - reference))
            crypten.debug.set_validation_mode(True)
            raise ValueError(f"Function {func_name} returned incorrect values")

        crypten.debug.set_validation_mode(True)

        return result_enc
Пример #9
0
    def backward(ctx, grad_output):
        reciprocal, other = ctx.saved_tensors
        grad_input = reciprocal.square().mul(other).mul(grad_output).neg()
        grad_input = _inverse_broadcast(grad_input, reciprocal.size())

        if torch.is_tensor(other) or crypten.is_encrypted_tensor(other):
            grad_other = reciprocal.mul(grad_output)
            grad_other = _inverse_broadcast(grad_other, other.size())
            return (grad_input, grad_other)
        else:
            return grad_input
Пример #10
0
    def _check_forward_backward(
        self, func_name, input_tensor, *args, torch_func_name=None, msg=None, **kwargs
    ):
        """Checks forward and backward against PyTorch

        Args:
            func_name (str): PyTorch/CrypTen function name
            input_tensor (torch.tensor): primary input
            args (list): contains arguments for function
            msg (str): additional message for mismatch
            kwargs (list): keyword arguments for function
        """

        if msg is None:
            msg = f"{func_name} grad_fn incorrect"

        input = input_tensor.clone()
        input.requires_grad = True
        input_encr = crypten.cryptensor(input, requires_grad=True)

        for private in [False, True]:
            input.grad = None
            input_encr.grad = None
            args = self._set_grad_to_zero(args)
            args_encr = self._set_grad_to_zero(list(args), make_private=private)

            # obtain torch function
            if torch_func_name is not None:
                torch_func = self._get_torch_func(torch_func_name)
            else:
                torch_func = self._get_torch_func(func_name)

            reference = torch_func(input, *args, **kwargs)
            encrypted_out = getattr(input_encr, func_name)(*args_encr, **kwargs)

            # extract argmax output for max / min with keepdim=False
            if isinstance(encrypted_out, (list, tuple)):
                reference = reference[0]
                encrypted_out = encrypted_out[0]

            self._check(encrypted_out, reference, msg + " in forward")

            # check backward pass
            grad_output = get_random_test_tensor(
                max_value=2, size=reference.size(), is_float=True
            )
            grad_output_encr = crypten.cryptensor(grad_output)
            reference.backward(grad_output)
            encrypted_out.backward(grad_output_encr)

            self._check(input_encr.grad, input.grad, msg + " in backward")
            for i, arg_encr in enumerate(args_encr):
                if crypten.is_encrypted_tensor(arg_encr):
                    self._check(arg_encr.grad, args[i].grad, msg + " in backward args")
Пример #11
0
    def backward(ctx, grad_output):
        saved = ctx.saved_tensors

        # saved is a list of [input, other_reciprocal]
        if crypten.is_encrypted_tensor(saved[1]):
            input, other_reciprocal = saved
            grad_input = other_reciprocal.mul(grad_output)
            grad_other = other_reciprocal.square().mul(input).mul(grad_output).neg()
            return (
                _inverse_broadcast(grad_input, input.size()),
                _inverse_broadcast(grad_other, other_reciprocal.size()),
            )
        # saved is a public tensor or scalar
        else:
            input_size, other = saved
            grad_input = grad_output.div(other)
            if torch.is_tensor(other):
                return _inverse_broadcast(grad_input, input_size)
            else:
                return grad_input
Пример #12
0
    def test_where(self):
        """Test that crypten.where properly conditions"""
        sizes = [(10,), (5, 10), (1, 5, 10)]
        y_types = [lambda x: x, crypten.cryptensor]

        for size, y_type in itertools.product(sizes, y_types):
            tensor1 = get_random_test_tensor(size=size, is_float=True)
            encrypted_tensor1 = crypten.cryptensor(tensor1)
            tensor2 = get_random_test_tensor(size=size, is_float=True)
            encrypted_tensor2 = y_type(tensor2)

            condition_tensor = (
                get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
            )
            condition_encrypted = crypten.cryptensor(condition_tensor)
            condition_bool = condition_tensor.bool()

            reference_out = torch.where(condition_bool, tensor1, tensor2)

            encrypted_out = crypten.where(
                condition_bool, encrypted_tensor1, encrypted_tensor2
            )

            y_is_private = crypten.is_encrypted_tensor(tensor2)
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with public condition",
            )

            encrypted_out = encrypted_tensor1.where(
                condition_encrypted, encrypted_tensor2
            )
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with private condition",
            )
Пример #13
0
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
    # Initialize random weights
    w = features.new(torch.randn(1, features.size(0)))
    b = features.new(torch.randn(1))

    if print_time:
        pt_time = AverageMeter()
        end = time.time()

    for epoch in range(epochs):
        # Forward
        label_predictions = w.matmul(features).add(b).sign()

        # Compute accuracy
        correct = label_predictions.mul(labels)
        accuracy = correct.add(1).div(2).mean()
        if crypten.is_encrypted_tensor(accuracy):
            accuracy = accuracy.get_plain_text()

        # Print Accuracy once
        if crypten.communicator.get().get_rank() == 0:
            logging.info(f"Epoch {epoch} --- Training Accuracy %.2f%%" %
                         (accuracy.item() * 100))

        # Backward
        loss_grad = -labels * (1 - correct) * 0.5  # Hinge loss
        b_grad = loss_grad.mean()
        w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))

        # Update
        w -= w_grad * lr
        b -= b_grad * lr

        if print_time:
            iter_time = time.time() - end
            pt_time.add(iter_time)
            logging.info("    Time %.6f (%.6f)" % (iter_time, pt_time.value()))
            end = time.time()

    return w, b
Пример #14
0
    def _process_targets(self, targets):
        """Encrypts targets and RR to targets if necessary"""
        if self.rr_prob is not None:
            flip_probs = torch.tensor(self.rr_prob).expand(targets.size())

        # Apply appropriate RR-protocol and encrypt targets if necessary
        if self.rr_prob is not None:
            flip_probs = torch.tensor(self.rr_prob).expand(targets.size())

        if crypten.is_encrypted_tensor(targets):
            if self.rr_prob is not None:
                flip_mask = crypten.bernoulli(flip_probs)
                targets = targets + flip_probs - 2 * flip_mask * targets
            targets_enc = targets
        else:
            # Label provider adds RR label flips if they are plaintext
            if self.rr_prob is not None and self.is_label_src():
                flip_mask = flip_probs.bernoulli()
                targets += flip_mask - 2 * targets * flip_mask

            # Encrypt targets:
            targets_enc = crypten.cryptensor(targets, src=self.label_src)

        return targets_enc
Пример #15
0
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
    # Initialize random weights
    w = features.new(torch.randn(1, features.size(0)))
    b = features.new(torch.randn(1))

    if print_time:
        pt_time = AverageMeter()
        end = time.time()

    for epoch in range(epochs):
        # Forward
        #pdb.set_trace()
        label_predictions_1 = w.matmul(features)
        print(label_predictions_1)
        #pdb.set_trace()
        #result = recursive_map(label_predictions_1)
        #pdb.set_trace()
        #label_predictions = w.matmul(features).add(b).sign()
        for i in range(len(label_predictions_1[0])):
            #pdb.set_trace()
            if (label_predictions_1[0][i] < -0.5):
                label_predictions_1[0][i] = 0.0
            elif label_predictions_1[0][i] > 0.5:
                label_predictions_1[0][i] = 1.0
            else:
                label_predictions_1[0][i] = label_predictions_1[0][i] + 0.5
            # f(wx) + b
            #label_predictions_1[0][i] = label_predictions_1[0][i].add(b).sign()
            label_predictions_1[0][i] = label_predictions_1[0][i].add(b)

        print(label_predictions_1)
        #pdb.set_trace()

        #label_predictions = logistic_regression(w.matmul(features)).add(b).sign()
        #print("Iteration: " + epoch)
        #print(label_predictions)
        # Compute accuracy
        label_predictions = label_predictions_1
        correct = label_predictions.mul(labels)
        accuracy = correct.add(1).div(2).mean()
        if crypten.is_encrypted_tensor(accuracy):
            accuracy = accuracy.get_plain_text()

        # Print Accuracy once
        if crypten.communicator.get().get_rank() == 0:
            print(f"Epoch {epoch} --- Training Accuracy %.2f%%" %
                  (accuracy.item() * 100))

        # Backward
        loss_grad = -labels * (1 - correct) * 0.5  # Hinge loss
        b_grad = loss_grad.mean()
        w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))

        # Update
        w -= w_grad * lr
        b -= b_grad * lr

        if print_time:
            iter_time = time.time() - end
            pt_time.add(iter_time)
            logging.info("    Time %.6f (%.6f)" % (iter_time, pt_time.value()))
            end = time.time()

    return w, b
Пример #16
0
    def _check_forward_backward(self,
                                fn_name,
                                input_tensor,
                                *args,
                                msg=None,
                                **kwargs):
        if msg is None:
            msg = f"{fn_name} grad_fn incorrect"

        for requires_grad in [True]:
            # Setup input
            input = input_tensor.clone()
            input.requires_grad = requires_grad
            input_encr = AutogradCrypTensor(crypten.cryptensor(input),
                                            requires_grad=requires_grad)

            for private in [False, True]:
                input.grad = None
                input_encr.grad = None

                # Setup args
                args_encr = list(args)
                for i, arg in enumerate(args):
                    if private and is_float_tensor(arg):
                        args_encr[i] = AutogradCrypTensor(
                            crypten.cryptensor(arg),
                            requires_grad=requires_grad)
                        args_encr[i].grad = None  # zero grad
                    if is_float_tensor(arg):
                        args[i].requires_grad = requires_grad
                        args[i].grad = None  # zero grad

                # Check forward pass
                if hasattr(input, fn_name):
                    reference = getattr(input, fn_name)(*args, **kwargs)
                elif hasattr(F, fn_name):
                    reference = getattr(F, fn_name)(input, *args, **kwargs)
                elif fn_name == "square":
                    reference = input.pow(2)
                else:
                    raise ValueError("unknown PyTorch function: %s" % fn_name)

                encrypted_out = getattr(input_encr, fn_name)(*args_encr,
                                                             **kwargs)

                # Remove argmax output from max / min
                if isinstance(encrypted_out, (list, tuple)):
                    reference = reference[0]
                    encrypted_out = encrypted_out[0]

                self._check(encrypted_out, reference, msg + " in forward")

                # Check backward pass
                grad_output = get_random_test_tensor(max_value=2,
                                                     size=reference.size(),
                                                     is_float=True)
                grad_output_encr = crypten.cryptensor(grad_output)

                # Do not check backward if pytorch backward fails
                try:
                    reference.backward(grad_output)
                except RuntimeError:
                    logging.info("skipped")
                    continue
                encrypted_out.backward(grad_output_encr)

                self._check(input_encr.grad, input.grad, msg + " in backward")
                for i, arg_encr in enumerate(args_encr):
                    if crypten.is_encrypted_tensor(arg_encr):
                        self._check(arg_encr.grad, args[i].grad,
                                    msg + " in backward args")
Пример #17
0
 def forward(self, x):
     size = torch.tensor(x.size())
     if crypten.is_encrypted_tensor(x):
         size = crypten.cryptensor(size.float())
     return size