Exemplo n.º 1
0
def test_extraction():
    try:
        print("Creating victim model")
        model = train_mnist_victim(gpus=0)

        def query_mnist(input_data):
            return get_target(model, input_data)

        print("Downloading EMNIST data")
        emnist_train, emnist_test = get_emnist_data()

        print("Launching model extraction attack")
        # A single GPU is assumed
        attack = ModelExtractionAttack(
            query=query_mnist,
            query_limit=100,
            victim_input_shape=(1, 28, 28, 1),
            victim_output_targets=10,
            substitute_input_shape=(1, 3, 28, 28),
            synthesizer="copycat",
            substitute_model_arch=ImagenetTransferLearning,
            substitute_input_size=1000,
            seed_data_train=emnist_train,
            seed_data_test=emnist_test,
            gpus=0,
        )
        print(attack)
    except Exception:
        pytest.fail("Unexpected Error")
Exemplo n.º 2
0
    def __attrs_post_init__(self):
        self.query = establish_query(self.query, self.victim_input_shape)

        # We use the dict of the attack to unpack all the extraction arguments
        # This will need to be changed as ModelExtractionAttack is changed

        config = attr.asdict(self)
        extract_args = copy.deepcopy(config)
        # print(extract_args)
        extract_args.pop("data_point")
        extract_args.pop("threshold")
        extract_args = extract_args.values()

        self.extraction_attack = ModelExtractionAttack(*extract_args)
        self.substitute_model = extraction.substitute_model

        self.query_substitute = lambda x: query_model(substitute, x, self.substitute_input_shape)
        pred, target = query_substitute(self.data_point)

        # target = target.unsqueeze(0)
        # output = torch.nn.functional.cross_entropy(pred, target)

        # t_pred, t_target = query_substitute(self.seed_data_train)

        # We need diff formats for threshold: #, function, string (?) 

        # threshold = torch.nn.functional.cross_entropy()
        # print("Cross Entropy Loss is: " + output)
        # print("AUROC is: " + auroc)

        # We need multiple: binary classifier & threshold 
        # This maps to attackNN-based and metric-based attacks
        if threshold = None:
            binary_classifier = True
Exemplo n.º 3
0
def test_extraction():
    """End-to-end test of a model extraction attack"""

    # Create a query function for a target PyTorch Lightning model
    model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())

    def query_mnist(input_data):
        # PrivacyRaven provides built-in query functions
        return get_target(model, input_data, (1, 28, 28, 1))

    # Obtain seed (or public) data to be used in extraction
    emnist_train, emnist_test = get_emnist_data()

    # Run a model extraction attack
    attack = ModelExtractionAttack(
        query=query_mnist,
        query_limit=100,
        victim_input_shape=(1, 28, 28, 1),  # EMNIST data point shape
        victim_output_targets=10,
        substitute_input_shape=(3, 1, 28, 28),
        synthesizer="copycat",
        substitute_model_arch=FourLayerClassifier,  # 28*28: image size
        substitute_input_size=784,
        seed_data_train=emnist_train,
        seed_data_test=emnist_test,
        gpus=0,
    )
Exemplo n.º 4
0
def cloudleak(
    query,
    query_limit,
    victim_input_shape,
    victim_output_targets,
    substitute_input_shape,
    s,
    substitute_model,
    substitute_input_size,
    seed_data_train,
    seed_data_test,
    transform,
    batch_size,
    num_workers,
    gpus,
    max_epochs,
    learning_rate,
):
    """Run CloudLeak model extraction attacks

    Returns an array of attacks that use synthesis functions
    based on adversarial/evasion attacks

    Based upon: https://bit.ly/31Npbgj

    Unlike the paper, this function does not include subset
    sampling strategies and relies upon different evasion
    attacks in order to comply with the threat model"""

    adv_synths = ["HopSkipJump"]

    cloudleak_attacks = []

    for s in adv_synths:
        attack = ModelExtractionAttack(
            query,
            query_limit,
            victim_input_shape,
            victim_output_targets,
            substitute_input_shape,
            s,
            substitute_model,
            substitute_input_size,
            seed_data_train,
            seed_data_test,
            transform,
            batch_size,
            num_workers,
            gpus,
            max_epochs,
            learning_rate,
        )
        cloudleak_attacks = cloudleak_attacks.append(attack)

    return cloudleak_attacks
Exemplo n.º 5
0
def joint_train_inversion_model(dataset_train=None,
                                dataset_test=None,
                                data_dimensions=(1, 28, 28, 1),
                                max_epochs=None,
                                gpus=1,
                                t=1,
                                c=50):

    # The following is a proof of concept of Figure 4 from the paper
    # "Neural Network Inversion in Adversarial Setting via Background Knowledge Alignment"

    temp_model = train_four_layer_mnist_victim(gpus=gpus)

    def query_mnist(input_data):
        # PrivacyRaven provides built-in query functions
        return get_target(temp_model, input_data, (1, 28, 28, 1))

    forward_model = ModelExtractionAttack(
        query_mnist,
        1000,  # Less than the number of MNIST data points: 60000
        (1, 28, 28, 1),
        10,
        (3, 1, 28, 28),  # Shape of an EMNIST data point
        "copycat",  # "copycat",
        FourLayerClassifier,
        784,  # 28 * 28 or the size of a single image
        dataset_train,
        dataset_test,
        gpus=gpus).substitute_model

    # Due to PrivacyRaven's black box threat model, we first run a model extraction attack on the
    # target classifier to extract and train a fully-trained substitute model, which the user has white-box access to.
    # Ideally, if the model extraction is successful, then this substitute model should approximate the target classifier
    # to a reasonable degree of fidelity and accuracy.
    # We then train the inversion model using the substitute model to query the auxiliary dataset on
    # under the objective of minimizing the MSE loss between the reconstructed and auxiliary
    # datapoints.

    inversion_model = train_mnist_inversion(gpus=gpus,
                                            forward_model=forward_model,
                                            inversion_params={
                                                "nz": 10,
                                                "ngf": 128,
                                                "affine_shift": c,
                                                "truncate": t
                                            },
                                            max_epochs=max_epochs,
                                            batch_size=100)

    return forward_model, inversion_model
Exemplo n.º 6
0
def copycats(
    query,
    query_limit,
    victim_input_shape,
    victim_output_targets,
    substitute_input_shape,
    substitute_model,
    substitute_input_size,
    seed_data_train,
    seed_data_test,
    transform,
    batch_size,
    num_workers,
    gpus,
    max_epochs,
    learning_rate,
):
    """Runs the CopyCat model extraction attack

    Arxiv Paper: https://arxiv.org/abs/1806.05476

    Presently, this function excludes subset sampling strategies"""

    synthesizer = "copycat"

    attack = ModelExtractionAttack(
        query,
        query_limit,
        victim_input_shape,
        victim_output_targets,
        substitute_input_shape,
        synthesizer,
        substitute_model,
        substitute_input_size,
        seed_data_train,
        seed_data_test,
        transform,
        batch_size,
        num_workers,
        gpus,
        max_epochs,
        learning_rate,
    )

    return attack
Exemplo n.º 7
0
 def extract_substitute(self):
     extract = ModelExtractionAttack(
         self.query,
         self.query_limit,
         self.victim_input_shape,
         self.victim_output_targets,
         self.substitute_input_shape,
         self.synthesizer,
         self.substitute_model,
         self.substitute_input_size,
         self.seed_data_train,
         self.seed_data_test,
         self.transform,
         self.batch_size,
         self.num_workers,
         self.gpus,
         self.max_epochs,
         self.learning_rate,
     )
     return extract
Exemplo n.º 8
0
def run_all_extraction(
    query,
    query_limit=100,
    victim_input_shape=None,
    victim_output_targets=None,  # (targets)
    substitute_input_shape=None,
    substitute_model=None,
    substitute_input_size=1000,
    seed_data_train=None,
    seed_data_test=None,
    transform=None,
    batch_size=100,
    num_workers=4,
    gpus=1,
    max_epochs=10,
    learning_rate=1e-3,
):
    """Run all extraction attacks.

    This needs to be updated with the class signature."""

    for s in synths:
        ModelExtractionAttack(
            query,
            query_limit,
            victim_input_shape,
            victim_output_targets,
            substitute_input_shape,
            s,
            substitute_model,
            substitute_input_size,
            seed_data_train,
            seed_data_test,
            transform,
            batch_size,
            num_workers,
            gpus,
            max_epochs,
            learning_rate,
        )
Exemplo n.º 9
0
class CustomCallback(Callback):
    def on_epoch_end(self, trainer, pl_module):
        print('End of epoch')


# Runs a Model Extraction Attack with the user-defined CustomCallback specified as an argument.
# Note that parentheses are needed while passing in the callback, since
# Pytorch Lightning bolt callbacks are classes that need to be instantiated.

attack = ModelExtractionAttack(query=query_mnist,
                               query_limit=100,
                               victim_input_shape=(1, 28, 28, 1),
                               victim_output_targets=10,
                               substitute_input_shape=(3, 1, 28, 28),
                               synthesizer="copycat",
                               substitute_model_arch=FourLayerClassifier,
                               substitute_input_size=784,
                               seed_data_train=emnist_train,
                               seed_data_test=emnist_test,
                               gpus=1,
                               callback=CustomCallback())

# Many built-in Pytorch Lightning Bolt callbacks are already very useful.  Consider the following example, which
# runs the same Model Extraction Attack with the Pytorch built-in PrintTableMetricsCallback specified as an argument.
# After every epoch, a table should be displayed with all of the training metrics (e.g. training loss)
attack = ModelExtractionAttack(query=query_mnist,
                               query_limit=100,
                               victim_input_shape=(1, 28, 28, 1),
                               victim_output_targets=10,
                               substitute_input_shape=(3, 1, 28, 28),
                               synthesizer="copycat",
Exemplo n.º 10
0
from privacyraven.models.four_layer import FourLayerClassifier

# Trains a 4-layer fully connected neural network on MNIST data using the user's GPUs.  See
# src/privacyraven/models/victims.py for a full set of supported parameters.

model = train_four_layer_mnist_victim(gpus=1)


# Create a query function for a target PyTorch Lightning model
def query_mnist(input_data):
    # PrivacyRaven provides built-in query functions
    return get_target(model, input_data, (1, 28, 28, 1))


# Obtain seed (or public) data to be used in extraction
emnist_train, emnist_test = get_emnist_data()

# Run a model extraction attack
attack = ModelExtractionAttack(
    query_mnist,
    200,  # Less than the number of MNIST data points: 60000
    (1, 28, 28, 1),
    10,
    (3, 1, 28, 28),  # Shape of an EMNIST data point
    "hopskipjump",  # "copycat",
    FourLayerClassifier,
    784,  # 28 * 28 or the size of a single image
    emnist_train,
    emnist_test,
)
Exemplo n.º 11
0
@register_synth
def custom_synthesizer(data,
                       query,
                       query_limit,
                       victim_input_shape,
                       substitute_input_shape,
                       reshape=True):
    """Creates a synthetic dataset by labeling seed data"""
    (x_data, y_data) = data
    y_data = query(x_data)
    if reshape:
        x_data = reshape_input(x_data, substitute_input_shape)
    return x_data, y_data


# Gets name of synthesizer function.
attack = ModelExtractionAttack(
    query=query_mnist,
    query_limit=100,
    victim_input_shape=(1, 28, 28, 1),
    victim_output_targets=10,
    substitute_input_shape=(3, 1, 28, 28),
    synthesizer="custom_synthesizer",
    substitute_model_arch=FourLayerClassifier,
    substitute_input_size=784,
    seed_data_train=emnist_train,
    seed_data_test=emnist_test,
    gpus=1,
)
Exemplo n.º 12
0
from privacyraven.utils.query import get_target
from privacyraven.models.victim import train_mnist_victim
from privacyraven.models.pytorch import ImagenetTransferLearning

# Create a query function for a target PyTorch Lightning model
model = train_mnist_victim(gpus=0)


def query_mnist(input_data):
    # PrivacyRaven provides built-in query functions
    return get_target(model, input_data)


# Obtain seed (or public) data to be used in extraction
emnist_train, emnist_test = get_emnist_data()

# Run a Model Extraction Attack
attack = ModelExtractionAttack(
    query=query_mnist,
    query_limit=100,
    victim_input_shape=(1, 28, 28, 1),
    victim_output_targets=10,
    substitute_input_shape=(1, 3, 28, 28),
    synthesizer="copycat",
    substitute_model_arch=ImagenetTransferLearning,
    substitute_input_size=1000,
    seed_data_train=emnist_train,
    seed_data_test=emnist_test,
    gpus=0,
)
Exemplo n.º 13
0
from privacyraven.extraction.core import ModelExtractionAttack
from privacyraven.utils.query import get_target
from privacyraven.models.victim import train_mnist_victim
from privacyraven.models.pytorch import ImagenetTransferLearning

# Create a query function for a target PyTorch Lightning model
model = train_mnist_victim()


def query_mnist(input_data):
    # PrivacyRaven provides built-in query functions
    return get_target(model, input_data)


# Obtain seed (or public) data to be used in extraction
emnist_train, emnist_test = get_emnist_data()

# Run a Model Extraction Attack
attack = ModelExtractionAttack(
    query_mnist,
    100,
    (1, 28, 28, 1),
    10,
    (1, 3, 28, 28),
    "copycat",
    ImagenetTransferLearning,
    1000,
    emnist_train,
    emnist_test,
)
Exemplo n.º 14
0
# trains a 4-layer fully connected neural network on MNIST data with the user's CPU.  See
# src/privacyraven/models/victims.py for a full set of supported parameters.

model = train_four_layer_mnist_victim(gpus=0)


# Create a query function for a target PyTorch Lightning model
def query_mnist(input_data):
    # PrivacyRaven provides built-in query functions
    return get_target(model, input_data, (1, 28, 28, 1))


# Obtain seed (or public) data to be used in extraction
emnist_train, emnist_test = get_emnist_data()

# Run a model extraction attack
attack = ModelExtractionAttack(
    query=query_mnist,
    query_limit=100,
    victim_input_shape=(1, 28, 28, 1),  # EMNIST data point shape
    victim_output_targets=10,
    substitute_input_shape=(3, 1, 28, 28),
    synthesizer="copycat",
    substitute_model_arch=FourLayerClassifier,  # 28*28: image size
    substitute_input_size=784,
    seed_data_train=emnist_train,
    seed_data_test=emnist_test,
    gpus=0,
)