Exemple #1
0
def foolbox_generate_adversarial_example(
        label: int,
        create_model,
        input_fn: Callable[[], tf.Tensor],
        attack_fn: Callable[..., Attack],
        model_dir=None,
        checkpoint_path=None,
        preprocessing=(0, 1),
        channel_axis=1,
        bounds=(0, 1),
        attack_params={},
        **kwargs,
) -> Optional[np.ndarray]:
    # Check that model has been trained.
    if not checkpoint_path:
        checkpoint_path = saver.latest_checkpoint(model_dir)
    if not checkpoint_path:
        raise ValueError(
            "Could not find trained model in model_dir: {}.".format(model_dir))

    with tf.Graph().as_default():
        features = input_fn()
        model = create_model()
        image_tensor = tf.placeholder(features.dtype, features.shape)
        logits = model(image_tensor)
        sm = SessionManager()
        with sm.prepare_session(
                master="",
                saver=tf.train.Saver(),
                checkpoint_filename_with_path=checkpoint_path,
                config=new_session_config(),
        ) as sess:
            image = sess.run(features)[0]

            attack_model = TensorFlowModel(
                image_tensor,
                logits,
                bounds=bounds,
                channel_axis=channel_axis,
                preprocessing=preprocessing,
            )
            if attack_fn is None:
                return image[np.newaxis]

            attack = attack_fn(attack_model)
            # adversarial_example = attack(image, label=label, **kwargs)
            adversarial_example = attack(image, label=label, **attack_params)
            # diff = np.abs(image - adversarial_example)
            # print(diff.max()*255)

            if adversarial_example is None:
                return None
            else:
                return adversarial_example[np.newaxis]
def forward_propagate_batch_feature(
    create_model,
    input_fn,
    model_dir: str,
    forward_fn: Callable[[tf.Tensor], tf.Tensor] = lambda logits: tf.argmax(logits, axis=1),
    data_format: str = "channels_first",
    parallel: int = 1,
    prediction_hooks = None,
) -> Union[int, float]:

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]

        if mode == tf.estimator.ModeKeys.PREDICT:
            logits, feature = create_model()(image, training=False)
            predictions = {
                "classes": forward_fn(logits),
                "feature": feature,
                }
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=prediction_hooks,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    if data_format is None:
        data_format = (
            "channels_first" if tf.test.is_built_with_cuda() else "channels_last"
        )
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=parallel)
    )
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    result = list(classifier.predict(input_fn=input_fn))
    prediction = np.array([v["classes"] for v in result])
    feature = np.array([v["feature"] for v in result])
    return prediction, feature
Exemple #3
0
def load_tf_model_resnet10(
    pytorch_model,
    model_dir=resnet10_ckpt_dir,
):
    create_model = lambda: partial(
        ResNet10Cifar10(),
        training=False,
    )

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]
        forward_fn = lambda logits: tf.argmax(logits, axis=1)
        if mode == tf.estimator.ModeKeys.PREDICT:
            logits = create_model()(image, training=False)
            predictions = {"classes": forward_fn(logits)}
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=None,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    data_format = ("channels_first"
                   if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=1))
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    # pred = classifier.predict(input_fn=input_fn)
    # result = np.array([v["classes"] for v in pred])

    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)
    # print(pytorch_model)

    pytorch_model = tf_to_pytorch_model_resnet10(pytorch_model, classifier)
    return pytorch_model
Exemple #4
0
def load_tf_model_resnet18(
    pytorch_model,
    testset,
    model_dir=resnet18_ckpt_dir,
):
    create_model = lambda: partial(
        ResNet18Cifar10(),
        training=False,
    )

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]
        forward_fn = lambda logits: tf.argmax(logits, axis=1)
        if mode == tf.estimator.ModeKeys.PREDICT:
            # logits, input, feat = create_model()(image, training=False)
            logits = create_model()(image, training=False)
            predictions = {
                "classes": forward_fn(logits),
                # "input": input,
                # "feat": feat,
            }
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=None,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    data_format = ("channels_first"
                   if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=1))
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )
    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)

    # pred = classifier.predict(input_fn=input_fn)
    # result = [v for v in pred]
    # result = result[0]

    # Test tf model accuracy
    # correct = 0
    # for class_id in range(10):
    #     for image_id in range(0, 100, 1000):
    #         input_fn=lambda: (
    #             input_fn_for_adversarial_examples(
    #                 is_training= False,
    #                 data_dir=abspath(CIFAR10_PATH),
    #                 num_parallel_batches=1,
    #                 is_shuffle=False,
    #                 transform_fn=None,
    #             )
    #             .filter(
    #                 lambda image, label: tf.equal(
    #                     tf.convert_to_tensor(class_id, dtype=tf.int32), label
    #                 )
    #             )
    #             .skip(image_id)
    #             .take(100)
    #             .batch(100)
    #             .make_one_shot_iterator()
    #             .get_next()[0]
    #         )
    #         pred = classifier.predict(input_fn=input_fn)
    #         result = np.array([v["classes"] for v in pred])
    #         correct += (result==class_id).sum()
    # print(correct/1e3)
    # st()

    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)
    # print(pytorch_model)

    pytorch_model = tf_to_pytorch_model_resnet18(pytorch_model, classifier)

    return pytorch_model
def train(
    batch_size: int = 128,
    # train_epochs: int = 1,
    # epochs_between_evals: int = 1,
    train_epochs: int = 100,
    epochs_between_evals: int = 10,
    multi_gpu: bool = False,
    label: str = label,
):
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "1"
    tf.logging.set_verbosity(tf.logging.INFO)
    if label is None:
        model_dir = abspath(f"{root_dir}/model")
    else:
        model_dir = abspath(f"{root_dir}/model_{label}")
    # data_dir = abspath("/home/yxqiu/data/cifar10-raw")
    data_dir = abspath(CIFAR10_PATH)
    ckpt_dir = abspath(f"{model_dir}/ckpts")

    model_function = cifar10_model_fn
    if multi_gpu:
        # There are two steps required if using multi-GPU: (1) wrap the model_fn,
        # and (2) wrap the optimizer. The first happens here, and (2) happens
        # in the model_fn itself when the optimizer is defined.
        model_function = tf.contrib.estimator.replicate_model_fn(
            model_function, loss_reduction=tf.losses.Reduction.MEAN)

    estimator_config = tf.estimator.RunConfig(
        save_checkpoints_secs=60 * 60,
        keep_checkpoint_max=None,
        session_config=new_session_config(parallel=0),
    )
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=ckpt_dir,
        config=estimator_config,
        params={
            "batch_size": batch_size,
            "multi_gpu": multi_gpu,
            "loss_scale": 1
        },
    )

    for epoch in range(train_epochs // epochs_between_evals):
        # Train the model
        def train_input_fn():
            input = input_fn(
                is_training=True,
                data_dir=data_dir,
                batch_size=batch_size,
                num_epochs=epochs_between_evals,
            )
            return input

        # Set up training hook that logs the training accuracy every 100 steps.
        tensors_to_log = {"train_accuracy": "train_accuracy"}
        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)
        classifier.train(input_fn=train_input_fn, hooks=[logging_hook])

        # Evaluate the model and print results
        def eval_input_fn():
            return input_fn(
                is_training=False,
                data_dir=data_dir,
                batch_size=batch_size,
                num_epochs=epochs_between_evals,
            )

        eval_results = classifier.evaluate(input_fn=eval_input_fn)
    print(label)
    print("Evaluation results:\n\t%s" % eval_results)
    print()
def forward_propagate(
    create_model,
    input_fn,
    forward_fn: Callable[[tf.Tensor], tf.Tensor],
    model_dir: str,
    data_format: str = "channels_first",
    parallel: int = 1,
    prediction_hooks: List[SessionRunHook] = None,
    tag: str = "noop",
) -> Union[int, float]:

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]

        # Save inputs for visualization in tensorboard
        nonlocal prediction_hooks
        feature_trans = tf.transpose(image, perm=[0,2,3,1])
        image_summary = tf.summary.image(f"input_{tag}", feature_trans, max_outputs=100)
        eval_summary_hook = tf.train.SummarySaverHook(
                            summary_op=image_summary,
                            save_steps=2,
                            output_dir=result_dir)
        if prediction_hooks is None:
            prediction_hooks = [eval_summary_hook]
        else:
            prediction_hooks.append(eval_summary_hook)

        if mode == tf.estimator.ModeKeys.PREDICT:
            logits = create_model()(image, training=False)
            predictions = {"classes": forward_fn(logits)}
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=prediction_hooks,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    if data_format is None:
        data_format = (
            "channels_first" if tf.test.is_built_with_cuda() else "channels_last"
        )
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=parallel)
    )
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    # st()
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    result = list(classifier.predict(input_fn=input_fn))
    return result[0]["classes"]
Exemple #7
0
def cw_generate_adversarial_example(
    label: int,
    create_model,
    input_fn: Callable[[], tf.Tensor],
    attack_fn: Callable[..., CWAttack],
    model_dir=None,
    checkpoint_path=None,
    norm_fn=None,
    channel_axis=1,
    bounds=(0, 1),
    image_size=28,
    class_num=10,
    data_format="channels_first",
    targeted_class: int = -1,
    **kwargs,
) -> Optional[np.ndarray]:
    # Check that model has been trained.
    if not checkpoint_path:
        checkpoint_path = saver.latest_checkpoint(model_dir)
    if not checkpoint_path:
        raise ValueError(
            "Could not find trained model in model_dir: {}.".format(model_dir))

    with tf.Graph().as_default():
        features = input_fn()
        model = create_model()
        logit_tensor = model(norm_fn(features))
        sm = tf.train.SessionManager()
        with sm.prepare_session(
                master="",
                saver=tf.train.Saver(),
                checkpoint_filename_with_path=checkpoint_path,
                config=new_session_config(),
        ) as sess:
            image, logits = sess.run([features, logit_tensor])
            logits = logits[0]
            predict = np.argmax(logits)
            if predict != label:
                return image

    with tf.Graph().as_default():
        features = input_fn()
        model = create_model()

        with tf.Session(config=new_session_config()) as sess:

            def attack_model(x):
                with restore_scope(sess, checkpoint_path):
                    if norm_fn is not None:
                        x = norm_fn(x)
                    if data_format == "channels_first":
                        logits = model(tf.transpose(x, [0, 3, 1, 2]))
                    else:
                        logits = model(x)
                    return logits

            attack = attack_fn(
                model=LogitsFnWrapper(
                    num_channels=channel_axis,
                    image_size=image_size,
                    num_labels=class_num,
                    logits_fn=attack_model,
                ),
                sess=sess,
                targeted=(targeted_class != -1),
                boxmin=bounds[0],
                boxmax=bounds[1],
                **kwargs,
            )
            if data_format == "channels_first":
                image = sess.run(tf.transpose(features, [0, 2, 3, 1]))
            else:
                image = sess.run(features)
            adversarial_example = attack.attack(
                image,
                np.expand_dims(
                    (np.arange(class_num) == label).astype(np.float32), axis=0)
                if targeted_class == -1 else np.expand_dims(
                    (np.arange(class_num) == targeted_class).astype(
                        np.float32),
                    axis=0),
            )
            if data_format == "channels_first":
                return np.transpose(adversarial_example, (0, 3, 1, 2))
            else:
                return adversarial_example
def train(
    batch_size: int = 64,
    train_epochs: int = 10,
    data_format: str = "channels_first",
    multi_gpu: bool = False,
    label: str = None,
):
    tf.logging.set_verbosity(tf.logging.INFO)
    if label is None:
        model_dir = abspath(f"{root_dir}/model")
    else:
        model_dir = abspath(f"{root_dir}/model_{label}")
    ckpt_dir = os.path.join(model_dir, "ckpts")
    data_dir = abspath(MNIST_PATH)

    model_function = model_fn

    if multi_gpu:
        validate_batch_size_for_multi_gpu(batch_size)

        # There are two steps required if using multi-GPU: (1) wrap the model_fn,
        # and (2) wrap the optimizer. The first happens here, and (2) happens
        # in the model_fn itself when the optimizer is defined.
        model_function = tf.contrib.estimator.replicate_model_fn(
            model_fn, loss_reduction=tf.losses.Reduction.MEAN)

    if data_format is None:
        data_format = ("channels_first"
                       if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        keep_checkpoint_max=None,
        session_config=new_session_config(parallel=0))
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=ckpt_dir,
        params={
            "data_format": data_format,
            "multi_gpu": multi_gpu,
            "batch_size": batch_size,
        },
        config=estimator_config,
    )

    for epoch in range(train_epochs):
        # Train the model
        def train_input_fn():
            # When choosing shuffle buffer sizes, larger sizes result in better
            # randomness, while smaller sizes use less memory. MNIST is a small
            # enough dataset that we can easily shuffle the full epoch.
            ds = mnist.train(
                data_dir,
                transforms=transforms,
            )
            ds = ds.cache().shuffle(buffer_size=60000).batch(batch_size)
            return ds

        # Set up training hook that logs the training accuracy every 100 steps.
        tensors_to_log = {"train_accuracy": "train_accuracy"}
        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)
        classifier.train(input_fn=train_input_fn, hooks=[logging_hook])

        # Evaluate the model and print results
        def eval_input_fn():
            return (mnist.test(data_dir, ).batch(
                batch_size).make_one_shot_iterator().get_next())

        eval_results = classifier.evaluate(input_fn=eval_input_fn)
    print(label)
    print("Evaluation results:\n\t%s" % eval_results)
    print()