示例#1
0
    def test_get_loss_explicitly_provided(self):
        attack_input = AttackInputData(loss_train=np.array([1.0, 3.0, 6.0]),
                                       loss_test=np.array([1.0, 4.0, 6.0]))

        np.testing.assert_equal(attack_input.get_loss_train().tolist(),
                                [1.0, 3.0, 6.0])
        np.testing.assert_equal(attack_input.get_loss_test().tolist(),
                                [1.0, 4.0, 6.0])
def _compute_membership_probability(
        attack_input: AttackInputData,
        num_bins: int = 15) -> SingleMembershipProbabilityResult:
    """Computes each individual point's likelihood of being a member (denoted as privacy risk score in https://arxiv.org/abs/2003.10595).

  For an individual sample, its privacy risk score is computed as the posterior
  probability of being in the training set
  after observing its prediction output by the target machine learning model.

  Args:
    attack_input: input data for compute membership probability
    num_bins: the number of bins used to compute the training/test histogram

  Returns:
    membership probability results
  """

    # Uses the provided loss or entropy. Otherwise computes the loss.
    if attack_input.loss_train is not None and attack_input.loss_test is not None:
        train_values = attack_input.loss_train
        test_values = attack_input.loss_test
    elif attack_input.entropy_train is not None and attack_input.entropy_test is not None:
        train_values = attack_input.entropy_train
        test_values = attack_input.entropy_test
    else:
        train_values = attack_input.get_loss_train()
        test_values = attack_input.get_loss_test()

    # Compute the histogram in the log scale
    small_value = 1e-10
    train_values = np.maximum(train_values, small_value)
    test_values = np.maximum(test_values, small_value)

    min_value = min(train_values.min(), test_values.min())
    max_value = max(train_values.max(), test_values.max())
    bins_hist = np.logspace(np.log10(min_value), np.log10(max_value),
                            num_bins + 1)

    train_hist, _ = np.histogram(train_values, bins=bins_hist)
    train_hist = train_hist / (len(train_values) + 0.0)
    train_hist_indices = np.fmin(np.digitize(train_values, bins=bins_hist),
                                 num_bins) - 1

    test_hist, _ = np.histogram(test_values, bins=bins_hist)
    test_hist = test_hist / (len(test_values) + 0.0)
    test_hist_indices = np.fmin(np.digitize(test_values, bins=bins_hist),
                                num_bins) - 1

    combined_hist = train_hist + test_hist
    combined_hist[combined_hist == 0] = small_value
    membership_prob_list = train_hist / (combined_hist + 0.0)
    train_membership_probs = membership_prob_list[train_hist_indices]
    test_membership_probs = membership_prob_list[test_hist_indices]

    return SingleMembershipProbabilityResult(
        slice_spec=_get_slice_spec(attack_input),
        train_membership_probs=train_membership_probs,
        test_membership_probs=test_membership_probs)
  def test_get_loss_from_probs(self):
    attack_input = AttackInputData(
        probs_train=np.array([[0.1, 0.1, 0.8], [0.8, 0.2, 0]]),
        probs_test=np.array([[0, 0.0001, 0.9999], [0.07, 0.18, 0.75]]),
        labels_train=np.array([1, 0]),
        labels_test=np.array([0, 2]))

    np.testing.assert_allclose(
        attack_input.get_loss_train(), [2.30258509, 0.2231436], atol=1e-7)
    np.testing.assert_allclose(
        attack_input.get_loss_test(), [18.42068074, 0.28768207], atol=1e-7)
  def test_get_loss_from_logits(self):
    attack_input = AttackInputData(
        logits_train=np.array([[-0.3, 1.5, 0.2], [2, 3, 0.5]]),
        logits_test=np.array([[2, 0.3, 0.2], [0.3, -0.5, 0.2]]),
        labels_train=np.array([1, 0]),
        labels_test=np.array([0, 2]))

    np.testing.assert_allclose(
        attack_input.get_loss_train(), [0.36313551, 1.37153903], atol=1e-7)
    np.testing.assert_allclose(
        attack_input.get_loss_test(), [0.29860897, 0.95618669], atol=1e-7)
示例#5
0
def _run_threshold_attack(attack_input: AttackInputData):
    fpr, tpr, thresholds = metrics.roc_curve(
        np.concatenate((np.zeros(attack_input.get_train_size()),
                        np.ones(attack_input.get_test_size()))),
        np.concatenate(
            (attack_input.get_loss_train(), attack_input.get_loss_test())))

    roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)

    return SingleAttackResult(slice_spec=_get_slice_spec(attack_input),
                              attack_type=AttackType.THRESHOLD_ATTACK,
                              roc_curve=roc_curve)
示例#6
0
def _run_threshold_attack(attack_input: AttackInputData):
    """Runs a threshold attack on loss."""
    ntrain, ntest = attack_input.get_train_size(), attack_input.get_test_size()
    loss_train = attack_input.get_loss_train()
    loss_test = attack_input.get_loss_test()
    if loss_train is None or loss_test is None:
        raise ValueError(
            'Not possible to run threshold attack without losses.')
    fpr, tpr, thresholds = metrics.roc_curve(
        np.concatenate((np.zeros(ntrain), np.ones(ntest))),
        np.concatenate((loss_train, loss_test)))

    roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)

    return SingleAttackResult(
        slice_spec=_get_slice_spec(attack_input),
        data_size=DataSize(ntrain=ntrain, ntest=ntest),
        attack_type=AttackType.THRESHOLD_ATTACK,
        membership_scores_train=-attack_input.get_loss_train(),
        membership_scores_test=-attack_input.get_loss_test(),
        roc_curve=roc_curve)
示例#7
0
  def test_get_loss(self):
    attack_input = AttackInputData(
        logits_train=np.array([[0.3, 0.5, 0.2], [0.2, 0.3, 0.5]]),
        logits_test=np.array([[0.2, 0.3, 0.5], [0.3, 0.5, 0.2]]),
        labels_train=np.array([1, 0]),
        labels_test=np.array([0, 1])
    )

    np.testing.assert_equal(
        attack_input.get_loss_train().tolist(), [0.5, 0.2])
    np.testing.assert_equal(
        attack_input.get_loss_test().tolist(), [0.2, 0.5])
示例#8
0
def _slice_by_percentiles(data: AttackInputData, from_percentile: float,
                          to_percentile: float):
    """Slices samples by loss percentiles."""

    # Find from_percentile and to_percentile percentiles in losses.
    loss_train = data.get_loss_train()
    loss_test = data.get_loss_test()
    losses = np.concatenate((loss_train, loss_test))
    from_loss = np.percentile(losses, from_percentile)
    to_loss = np.percentile(losses, to_percentile)

    idx_train = (from_loss <= loss_train) & (loss_train <= to_loss)
    idx_test = (from_loss <= loss_test) & (loss_test <= to_loss)

    return _slice_data_by_indices(data, idx_train, idx_test)
def _compute_missing_privacy_report_metadata(
        metadata: PrivacyReportMetadata,
        attack_input: AttackInputData) -> PrivacyReportMetadata:
    """Populates metadata fields if they are missing."""
    if metadata is None:
        metadata = PrivacyReportMetadata()
    if metadata.accuracy_train is None:
        metadata.accuracy_train = _get_accuracy(attack_input.logits_train,
                                                attack_input.labels_train)
    if metadata.accuracy_test is None:
        metadata.accuracy_test = _get_accuracy(attack_input.logits_test,
                                               attack_input.labels_test)
    if metadata.loss_train is None:
        metadata.loss_train = np.average(attack_input.get_loss_train())
    if metadata.loss_test is None:
        metadata.loss_test = np.average(attack_input.get_loss_test())
    return metadata
示例#10
0
def create_attacker_data(attack_input_data: AttackInputData,
                         test_fraction: float = 0.25,
                         balance: bool = True) -> AttackerData:
    """Prepare AttackInputData to train ML attackers.

  Combines logits and losses and performs a random train-test split.

  Args:
    attack_input_data: Original AttackInputData
    test_fraction: Fraction of the dataset to include in the test split.
    balance: Whether the training and test sets for the membership inference
              attacker should have a balanced (roughly equal) number of samples
              from the training and test sets used to develop the model
              under attack.

  Returns:
    AttackerData.
  """
    attack_input_train = _column_stack(attack_input_data.logits_or_probs_train,
                                       attack_input_data.get_loss_train())
    attack_input_test = _column_stack(attack_input_data.logits_or_probs_test,
                                      attack_input_data.get_loss_test())

    if balance:
        min_size = min(attack_input_data.get_train_size(),
                       attack_input_data.get_test_size())
        attack_input_train = _sample_multidimensional_array(
            attack_input_train, min_size)
        attack_input_test = _sample_multidimensional_array(
            attack_input_test, min_size)
    ntrain, ntest = attack_input_train.shape[0], attack_input_test.shape[0]

    features_all = np.concatenate((attack_input_train, attack_input_test))

    labels_all = np.concatenate(((np.zeros(ntrain)), (np.ones(ntest))))

    # Perform a train-test split
    features_train, features_test, \
    is_training_labels_train, is_training_labels_test = \
      model_selection.train_test_split(
          features_all, labels_all, test_size=test_fraction, stratify=labels_all)
    return AttackerData(features_train, is_training_labels_train,
                        features_test, is_training_labels_test,
                        DataSize(ntrain=ntrain, ntest=ntest))