예제 #1
0
def run_trained_attack(attack_input: AttackInputData, attack_type: AttackType):
    """Classification attack done by ML models."""
    attacker = None

    if attack_type == AttackType.LOGISTIC_REGRESSION:
        attacker = models.LogisticRegressionAttacker()
    elif attack_type == AttackType.MULTI_LAYERED_PERCEPTRON:
        attacker = models.MultilayerPerceptronAttacker()
    elif attack_type == AttackType.RANDOM_FOREST:
        attacker = models.RandomForestAttacker()
    elif attack_type == AttackType.K_NEAREST_NEIGHBORS:
        attacker = models.KNearestNeighborsAttacker()
    else:
        raise NotImplementedError('Attack type %s not implemented yet.' %
                                  attack_type)

    prepared_attacker_data = models.create_attacker_data(attack_input)

    attacker.train_model(prepared_attacker_data.features_train,
                         prepared_attacker_data.is_training_labels_train)

    # Run the attacker on (permuted) test examples.
    predictions_test = attacker.predict(prepared_attacker_data.features_test)

    # Generate ROC curves with predictions.
    fpr, tpr, thresholds = metrics.roc_curve(
        prepared_attacker_data.is_training_labels_test, predictions_test)

    roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)

    return SingleAttackResult(slice_spec=_get_slice_spec(attack_input),
                              attack_type=attack_type,
                              roc_curve=roc_curve)
예제 #2
0
def run_seq2seq_attack(
        attack_input: Seq2SeqAttackInputData,
        privacy_report_metadata: PrivacyReportMetadata = None,
        balance_attacker_training: bool = True) -> AttackResults:
    """Runs membership inference attacks on a seq2seq model.

  Args:
    attack_input: input data for running an attack
    privacy_report_metadata: the metadata of the model under attack.
    balance_attacker_training: Whether the training and test sets for the
      membership inference attacker should have a balanced (roughly equal)
      number of samples from the training and test sets used to develop the
      model under attack.

  Returns:
    the attack result.
  """
    attack_input.validate()

    # The attacker uses the average rank (a single number) of a seq2seq dataset
    # record to determine membership. So only Logistic Regression is supported,
    # as it makes the most sense for single-number features.
    attacker = models.LogisticRegressionAttacker()

    # Create attacker data and populate fields of privacy_report_metadata
    privacy_report_metadata = privacy_report_metadata or PrivacyReportMetadata(
    )
    prepared_attacker_data = create_seq2seq_attacker_data(
        attack_input_data=attack_input,
        balance=balance_attacker_training,
        privacy_report_metadata=privacy_report_metadata)

    attacker.train_model(prepared_attacker_data.features_train,
                         prepared_attacker_data.is_training_labels_train)

    # Run the attacker on (permuted) test examples.
    predictions_test = attacker.predict(prepared_attacker_data.features_test)

    # Generate ROC curves with predictions.
    fpr, tpr, thresholds = metrics.roc_curve(
        prepared_attacker_data.is_training_labels_test, predictions_test)

    roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)

    attack_results = [
        SingleAttackResult(slice_spec=SingleSliceSpec(),
                           attack_type=AttackType.LOGISTIC_REGRESSION,
                           roc_curve=roc_curve,
                           data_size=prepared_attacker_data.data_size)
    ]

    return AttackResults(single_attack_results=attack_results,
                         privacy_report_metadata=privacy_report_metadata)
def _run_trained_attack(attack_input: AttackInputData,
                        attack_type: AttackType,
                        balance_attacker_training: bool = True):
    """Classification attack done by ML models."""
    attacker = None

    if attack_type == AttackType.LOGISTIC_REGRESSION:
        attacker = models.LogisticRegressionAttacker()
    elif attack_type == AttackType.MULTI_LAYERED_PERCEPTRON:
        attacker = models.MultilayerPerceptronAttacker()
    elif attack_type == AttackType.RANDOM_FOREST:
        attacker = models.RandomForestAttacker()
    elif attack_type == AttackType.K_NEAREST_NEIGHBORS:
        attacker = models.KNearestNeighborsAttacker()
    else:
        raise NotImplementedError('Attack type %s not implemented yet.' %
                                  attack_type)

    prepared_attacker_data = models.create_attacker_data(
        attack_input, balance=balance_attacker_training)

    attacker.train_model(prepared_attacker_data.features_train,
                         prepared_attacker_data.is_training_labels_train)

    # Run the attacker on (permuted) test examples.
    predictions_test = attacker.predict(prepared_attacker_data.features_test)

    # Generate ROC curves with predictions.
    fpr, tpr, thresholds = metrics.roc_curve(
        prepared_attacker_data.is_training_labels_test, predictions_test)

    roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)

    # NOTE: In the current setup we can't obtain membership scores for all
    # samples, since some of them were used to train the attacker. This can be
    # fixed by training several attackers to ensure each sample was left out
    # in exactly one attacker (basically, this means performing cross-validation).
    # TODO(b/175870479): Implement membership scores for predicted attackers.

    return SingleAttackResult(slice_spec=_get_slice_spec(attack_input),
                              data_size=prepared_attacker_data.data_size,
                              attack_type=attack_type,
                              roc_curve=roc_curve)
예제 #4
0
 def test_predict_before_training(self):
     lr_attacker = models.LogisticRegressionAttacker()
     self.assertRaises(AssertionError, lr_attacker.predict, [])