예제 #1
0
    def forward_one_image(class_id: int, image_id: int) -> Dict[str, Any]:
        nonlocal model_dir
        mode.check(False)
        data_dir = abspath(MNIST_PATH)
        model_dir = abspath(model_dir)
        ckpt_dir = f"{model_dir}/ckpts"
        create_model = lambda: LeNet(data_format="channels_first")
        graph = LeNet.graph().load()
        # model_fn = partial(
        #     model_fn_with_fetch_hook,
        #     create_model=create_model, graph=graph
        # )

        for transform, name in transforms:
            predicted_label = predict(
                create_model=create_model,
                input_fn=lambda: mnist.test(
                                            data_dir,
                                            transforms = transform
                                            )
                .filter(
                    lambda image, label: tf.equal(
                        tf.convert_to_tensor(class_id, dtype=tf.int32), label
                    )
                )
                .skip(image_id)
                .take(1)
                .batch(1),
                model_dir=ckpt_dir,
                tag = name,
            )
def save_adversarial_feature_batch(
    attack_name,
    image_id_index,
    batch_size,
    class_id,
    model_dir = model_dir,
    graph_dir = "result/test",
    dataset_mode = dataset_mode,
    images_per_class = 1,
    adversarial_dir = "result/test",
    **kwargs,
):
    
    # mode.check(False)
    data_dir = abspath(CIFAR10_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"
    create_model = lambda: partial(
        ResNet10Cifar10_Feature(),
        training = False,
    )
    graph = ResNet10Cifar10_Feature.graph().load()
    
    batch_size = min(batch_size, images_per_class - image_id_index)
    
    adversarial_examples = [
        resnet10_cifar10_example(
            attack_name=attack_name,
            attack_fn=None,
            generate_adversarial_fn=None,
            class_id=class_id,
            image_id=image_id,
            # model_dir not ckpt_dir
            model_dir=model_dir,
            transforms = None,
            transform_name = "noop",
            dataset_mode = dataset_mode,
        ).load()
        for image_id in range(image_id_index, image_id_index + batch_size)
    ]
    adversarial_examples = [v for v in adversarial_examples if v is not None]
    adversarial_examples = np.concatenate(adversarial_examples)
        
    adversarial_prediction, feature = forward_propagate_batch_feature(
        create_model=create_model,
        input_fn=lambda: tf.data.Dataset.from_tensors(
            adversarial_examples
        ),
        model_dir=ckpt_dir,
    )
    
    label = np.repeat([class_id], adversarial_examples.shape[0])
    
    return feature, label, adversarial_prediction
def save_training_feature_batch(
    image_id_index,
    batch_size,
    class_id,
    model_dir = model_dir,
    transforms = None,
    transform_name = "noop",
    graph_dir = "result/test",
    dataset_mode = dataset_mode,
    images_per_class = 1,
    **kwargs,
):
    
    # mode.check(False)
    data_dir = abspath(CIFAR10_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"
    create_model = lambda: partial(
        ResNet10Cifar10_Feature(),
        training = False,
    )
    graph = ResNet10Cifar10_Feature.graph().load()
    
    batch_size = min(batch_size, images_per_class - image_id_index)
    
    prediction, feature = forward_propagate_batch_feature(
        create_model=create_model,
        input_fn=lambda: (
            input_fn_for_adversarial_examples(
                is_training= (dataset_mode == "train"),
                data_dir=data_dir,
                num_parallel_batches=1,
                is_shuffle=False,
                transform_fn=None,
            )
            .filter(
                lambda image, label: tf.equal(
                    tf.convert_to_tensor(class_id, dtype=tf.int32), label
                )
            )
            .skip(image_id_index)
            .take(batch_size)
            .batch(batch_size)
            .make_one_shot_iterator()
            .get_next()[0]
        ),
        model_dir=ckpt_dir,
    )
    
    label = np.repeat([class_id], batch_size)
    return feature, label, prediction
예제 #4
0
def rename_checkpoint(
    checkpoint_dir, replace_from, replace_to, add_prefix=None, dry_run=False
):
    checkpoint_dir = abspath(checkpoint_dir)
    checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)
    with tf.Session() as sess:
        for var_name, _ in tf.contrib.framework.list_variables(checkpoint_dir):
            # Load the variable
            var = tf.contrib.framework.load_variable(checkpoint_dir, var_name)

            # Set the new name
            new_name = var_name
            if None not in [replace_from, replace_to]:
                new_name = new_name.replace(replace_from, replace_to)
            if add_prefix:
                new_name = add_prefix + new_name

            if dry_run:
                print("%s would be renamed to %s." % (var_name, new_name))
            else:
                print("Renaming %s to %s." % (var_name, new_name))
                # Rename the variable
                tf.Variable(var, name=new_name)

        if not dry_run:
            # Save the variables
            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())
            saver.save(sess, checkpoint.model_checkpoint_path)
 def get_trace() -> AttrMap:
     return reconstruct_static_trace_from_tf(
         model_fn=lambda: LeNet(),
         input_fn=lambda: tf.placeholder(tf.float32, shape=(1, 1, 28, 28)),
         model_dir=tf.train.latest_checkpoint(abspath("tf/lenet/model/")),
         density=threshold_to_density[threshold],
     )
def forward_propagate_batch_feature(
    create_model,
    input_fn,
    model_dir: str,
    forward_fn: Callable[[tf.Tensor], tf.Tensor] = lambda logits: tf.argmax(logits, axis=1),
    data_format: str = "channels_first",
    parallel: int = 1,
    prediction_hooks = None,
) -> Union[int, float]:

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]

        if mode == tf.estimator.ModeKeys.PREDICT:
            logits, feature = create_model()(image, training=False)
            predictions = {
                "classes": forward_fn(logits),
                "feature": feature,
                }
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=prediction_hooks,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    if data_format is None:
        data_format = (
            "channels_first" if tf.test.is_built_with_cuda() else "channels_last"
        )
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=parallel)
    )
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    result = list(classifier.predict(input_fn=input_fn))
    prediction = np.array([v["classes"] for v in result])
    feature = np.array([v["feature"] for v in result])
    return prediction, feature
예제 #7
0
def load_tf_model_resnet10(
    pytorch_model,
    model_dir=resnet10_ckpt_dir,
):
    create_model = lambda: partial(
        ResNet10Cifar10(),
        training=False,
    )

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]
        forward_fn = lambda logits: tf.argmax(logits, axis=1)
        if mode == tf.estimator.ModeKeys.PREDICT:
            logits = create_model()(image, training=False)
            predictions = {"classes": forward_fn(logits)}
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=None,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    data_format = ("channels_first"
                   if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=1))
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    # pred = classifier.predict(input_fn=input_fn)
    # result = np.array([v["classes"] for v in pred])

    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)
    # print(pytorch_model)

    pytorch_model = tf_to_pytorch_model_resnet10(pytorch_model, classifier)
    return pytorch_model
예제 #8
0
def eval(batch_size: int = 64,
         data_format: str = "channels_first",
         label: str = None):
    # tf.logging.set_verbosity(tf.logging.INFO)
    if label is None:
        model_dir = abspath(f"{root_dir}/model")
    else:
        model_dir = abspath(f"{root_dir}/model_{label}")
    ckpt_dir = os.path.join(model_dir, "ckpts")
    data_dir = abspath(MNIST_PATH)

    model_function = model_fn

    if data_format is None:
        data_format = ("channels_first"
                       if tf.test.is_built_with_cuda() else "channels_last")
    session_config = tf.ConfigProto(allow_soft_placement=True)
    session_config.gpu_options.allow_growth = True
    estimator_config = tf.estimator.RunConfig(session_config=session_config)
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=ckpt_dir,
        params={
            "data_format": data_format,
            "batch_size": batch_size
        },
        config=estimator_config,
    )

    # Evaluate the model and print results
    def eval_input_fn():
        return (mnist.test(data_dir).batch(
            batch_size).make_one_shot_iterator().get_next())

    eval_results = classifier.evaluate(input_fn=eval_input_fn)
    print(label)
    print("Evaluation results:\n\t%s" % eval_results)
    print()
예제 #9
0
def save_graph(
    attack_name: str,
    attack_fn,
    generate_adversarial_fn,
    select_fn: Callable[[np.ndarray], np.ndarray],
    image_id_index,
    batch_size,
    class_id,
    # model_dir = "result/lenet/model_augmentation",
    model_dir=model_dir,
    transforms=None,
    transform_name="noop",
    graph_dir="result/test",
    dataset_mode=dataset_mode,
    images_per_class=1,
    compute_adversarial=True,
    **kwargs,
):

    # mode.check(False)
    data_dir = abspath(CIFAR10_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"
    create_model = lambda: partial(
        ResNet10Cifar10(),
        training=False,
    )
    graph = ResNet10Cifar10.graph().load()

    batch_size = min(batch_size, images_per_class - image_id_index)

    predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda: (input_fn_for_adversarial_examples(
            is_training=(dataset_mode == "train"),
            data_dir=data_dir,
            num_parallel_batches=1,
            is_shuffle=False,
            transform_fn=None,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id_index).take(batch_size).batch(batch_size).
                          make_one_shot_iterator().get_next()[0]),
        model_dir=ckpt_dir,
    )
    prediction_valid = (predicted_label == class_id)
    if compute_adversarial:
        # The shape of each example is (1, 32, 32, 3)
        adversarial_examples = [
            resnet10_cifar10_example(
                attack_name=attack_name,
                attack_fn=attack_fn,
                generate_adversarial_fn=generate_adversarial_fn,
                class_id=class_id,
                image_id=image_id,
                # model_dir not ckpt_dir
                model_dir=model_dir,
                transforms=transforms,
                transform_name=transform_name,
                dataset_mode=dataset_mode,
            ).load()
            for image_id in range(image_id_index, image_id_index + batch_size)
        ]

        adversarial_valid = np.array(
            [example is not None for example in adversarial_examples])
        adversarial_examples = [
            example if example is not None else np.zeros((1, 32, 32, 3))
            for example in adversarial_examples
        ]
        adversarial_examples = np.squeeze(
            np.array(adversarial_examples).astype(np.float32), axis=1)

        # adversarial_example is [0, 1] of shape (1, 32, 32, 3)
        adversarial_predicted_label = predict_batch(
            create_model=create_model,
            input_fn=lambda: tf.data.Dataset.from_tensors(adversarial_examples
                                                          ),
            model_dir=ckpt_dir,
        )
        adversarial_prediction_valid = adversarial_predicted_label != class_id

    if compute_adversarial:
        batch_valid = (prediction_valid * adversarial_valid *
                       adversarial_prediction_valid)
    else:
        batch_valid = (prediction_valid)

    original_graph_dir = os.path.join(graph_dir, f"original_{transform_name}",
                                      f"{class_id}")
    original_graph_saver = IOBatchAction(
        dir=original_graph_dir,
        root_index=image_id_index,
    )
    original_model_fn = partial(
        model_fn_with_fetch_hook,
        create_model=create_model,
        graph=graph,
        graph_saver=original_graph_saver,
        batch_valid=batch_valid,
    )

    trace = reconstruct_trace_from_tf(
        class_id=class_id,
        model_fn=original_model_fn,
        input_fn=lambda: (input_fn_for_adversarial_examples(
            is_training=(dataset_mode == "train"),
            data_dir=data_dir,
            num_parallel_batches=1,
            is_shuffle=False,
            transform_fn=None,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id_index).take(batch_size).batch(batch_size).
                          make_one_shot_iterator().get_next()[0]),
        select_fn=select_fn,
        model_dir=ckpt_dir,
    )
    if compute_adversarial:
        adversarial_graph_dir = os.path.join(
            graph_dir, f"{attack_name}_{transform_name}", f"{class_id}")
        adversarial_graph_saver = IOBatchAction(
            dir=adversarial_graph_dir,
            root_index=image_id_index,
        )
        adversarial_model_fn = partial(
            model_fn_with_fetch_hook,
            create_model=create_model,
            graph=graph,
            graph_saver=adversarial_graph_saver,
            batch_valid=batch_valid,
        )

        adversarial_trace = reconstruct_trace_from_tf(
            model_fn=adversarial_model_fn,
            input_fn=lambda: tf.data.Dataset.from_tensors(adversarial_examples
                                                          ),
            select_fn=select_fn,
            model_dir=ckpt_dir,
        )
예제 #10
0
def load_tf_model_resnet18(
    pytorch_model,
    testset,
    model_dir=resnet18_ckpt_dir,
):
    create_model = lambda: partial(
        ResNet18Cifar10(),
        training=False,
    )

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]
        forward_fn = lambda logits: tf.argmax(logits, axis=1)
        if mode == tf.estimator.ModeKeys.PREDICT:
            # logits, input, feat = create_model()(image, training=False)
            logits = create_model()(image, training=False)
            predictions = {
                "classes": forward_fn(logits),
                # "input": input,
                # "feat": feat,
            }
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=None,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    data_format = ("channels_first"
                   if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=1))
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )
    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)

    # pred = classifier.predict(input_fn=input_fn)
    # result = [v for v in pred]
    # result = result[0]

    # Test tf model accuracy
    # correct = 0
    # for class_id in range(10):
    #     for image_id in range(0, 100, 1000):
    #         input_fn=lambda: (
    #             input_fn_for_adversarial_examples(
    #                 is_training= False,
    #                 data_dir=abspath(CIFAR10_PATH),
    #                 num_parallel_batches=1,
    #                 is_shuffle=False,
    #                 transform_fn=None,
    #             )
    #             .filter(
    #                 lambda image, label: tf.equal(
    #                     tf.convert_to_tensor(class_id, dtype=tf.int32), label
    #                 )
    #             )
    #             .skip(image_id)
    #             .take(100)
    #             .batch(100)
    #             .make_one_shot_iterator()
    #             .get_next()[0]
    #         )
    #         pred = classifier.predict(input_fn=input_fn)
    #         result = np.array([v["classes"] for v in pred])
    #         correct += (result==class_id).sum()
    # print(correct/1e3)
    # st()

    # train_var_names = classifier.get_variable_names()
    # for name in sorted(train_var_names):
    #     print(name)
    # print(pytorch_model)

    pytorch_model = tf_to_pytorch_model_resnet18(pytorch_model, classifier)

    return pytorch_model
예제 #11
0
    assign_bn(tf_classifier, "batch_normalization_6", model.layer4[0].bn1)
    assign_conv(tf_classifier, "conv2d_10", model.layer4[0].downsample[0])
    assign_conv(tf_classifier, "conv2d_11", model.layer4[0].conv1)
    assign_bn(tf_classifier, "batch_normalization_7", model.layer4[0].bn2)
    assign_conv(tf_classifier, "conv2d_12", model.layer4[0].conv2)

    assign_bn(tf_classifier, "batch_normalization_8", model.bn_last)
    assign_fc(tf_classifier, "dense", model.fc)

    return model


input_fn = lambda: (input_fn_for_adversarial_examples(
    is_training=False,
    data_dir=abspath(CIFAR10_PATH),
    num_parallel_batches=1,
    is_shuffle=False,
    transform_fn=None,
).filter(lambda image, label: tf.equal(tf.convert_to_tensor(0, dtype=tf.int32),
                                       label)).skip(0).take(1).batch(1).
                    make_one_shot_iterator().get_next()[0])


def load_tf_model_resnet10(
    pytorch_model,
    model_dir=resnet10_ckpt_dir,
):
    create_model = lambda: partial(
        ResNet10Cifar10(),
        training=False,
예제 #12
0
def save_trace(
    class_id,
    image_id,
    select_fn: Callable[[np.ndarray], np.ndarray],
    class_dir,
    graph_dir,
    create_model,
    graph,
    per_node: bool = False,
    images_per_class: int = 1,
    num_gpus: float = 1,
    model_dir=resnet18_dir,
    transforms=None,
    transform_name="noop",
    save_dir="result/test",
    dataset_mode=dataset_mode,
    **kwargs,
):

    input_fn = lambda: (input_fn_for_adversarial_examples(
        is_training=(dataset_mode == "train"),
        data_dir=data_dir,
        num_parallel_batches=1,
        is_shuffle=False,
        transform_fn=None,
    ).filter(lambda image, label: tf.equal(
        tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
            image_id).take(1).batch(1).make_one_shot_iterator().get_next()[0])

    mode.check(False)
    data_dir = abspath(CIFAR10_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"

    start = time.clock()
    predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda: (input_fn_for_adversarial_examples(
            is_training=(dataset_mode == "train"),
            data_dir=data_dir,
            num_parallel_batches=1,
            is_shuffle=False,
            transform_fn=None,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id).take(1).batch(1).make_one_shot_iterator().get_next()[
                    0]),
        model_dir=ckpt_dir,
    )
    predicted_label = predicted_label[0]
    if predicted_label != class_id:
        return [{}] if per_node else {}
    print(f"prediction {time.clock() - start}s")

    original_graph_dir = os.path.join(graph_dir, f"original_{transform_name}",
                                      f"{class_id}")

    original_graph_saver = IOBatchAction(
        dir=original_graph_dir,
        root_index=image_id,
    )
    original_model_fn = partial(
        model_fn_with_fetch_hook,
        create_model=create_model,
        graph=graph,
        graph_saver=original_graph_saver,
        batch_valid=[1],
    )
    trace = reconstruct_trace_from_tf(
        class_id=class_id,
        model_fn=original_model_fn,
        input_fn=lambda: (input_fn_for_adversarial_examples(
            is_training=(dataset_mode == "train"),
            data_dir=data_dir,
            num_parallel_batches=1,
            is_shuffle=False,
            transform_fn=None,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id).take(1).batch(1).make_one_shot_iterator().get_next()[
                    0]),
        select_fn=select_fn,
        model_dir=ckpt_dir,
    )
    print(f"Saved graph")

    graph_path = os.path.join(graph_dir, f"original_{transform_name}",
                              f"{class_id}", f"{image_id}.pkl")
    if not os.path.exists(graph_path):
        return

    single_graph = IOObjAction(graph_path).load()
    if use_class_trace:
        class_trace_avg = ClassTraceIOAction(class_dir, predicted_label).load()
        assert class_trace_avg is not None
    else:
        class_trace_avg = None

    start = time.clock()
    single_trace = reconstruct_trace_from_tf_to_trace(
        single_graph,
        class_id=(class_id if attack_name == "original" else None),
        select_fn=select_fn,
        class_trace=class_trace_avg,
        debug=False,
    )
    print(f"compute original trace {time.clock() - start}s")
예제 #13
0
def train(
    batch_size: int = 128,
    # train_epochs: int = 1,
    # epochs_between_evals: int = 1,
    train_epochs: int = 100,
    epochs_between_evals: int = 10,
    multi_gpu: bool = False,
    label: str = label,
):
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "1"
    tf.logging.set_verbosity(tf.logging.INFO)
    if label is None:
        model_dir = abspath(f"{root_dir}/model")
    else:
        model_dir = abspath(f"{root_dir}/model_{label}")
    # data_dir = abspath("/home/yxqiu/data/cifar10-raw")
    data_dir = abspath(CIFAR10_PATH)
    ckpt_dir = abspath(f"{model_dir}/ckpts")

    model_function = cifar10_model_fn
    if multi_gpu:
        # There are two steps required if using multi-GPU: (1) wrap the model_fn,
        # and (2) wrap the optimizer. The first happens here, and (2) happens
        # in the model_fn itself when the optimizer is defined.
        model_function = tf.contrib.estimator.replicate_model_fn(
            model_function, loss_reduction=tf.losses.Reduction.MEAN)

    estimator_config = tf.estimator.RunConfig(
        save_checkpoints_secs=60 * 60,
        keep_checkpoint_max=None,
        session_config=new_session_config(parallel=0),
    )
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=ckpt_dir,
        config=estimator_config,
        params={
            "batch_size": batch_size,
            "multi_gpu": multi_gpu,
            "loss_scale": 1
        },
    )

    for epoch in range(train_epochs // epochs_between_evals):
        # Train the model
        def train_input_fn():
            input = input_fn(
                is_training=True,
                data_dir=data_dir,
                batch_size=batch_size,
                num_epochs=epochs_between_evals,
            )
            return input

        # Set up training hook that logs the training accuracy every 100 steps.
        tensors_to_log = {"train_accuracy": "train_accuracy"}
        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)
        classifier.train(input_fn=train_input_fn, hooks=[logging_hook])

        # Evaluate the model and print results
        def eval_input_fn():
            return input_fn(
                is_training=False,
                data_dir=data_dir,
                batch_size=batch_size,
                num_epochs=epochs_between_evals,
            )

        eval_results = classifier.evaluate(input_fn=eval_input_fn)
    print(label)
    print("Evaluation results:\n\t%s" % eval_results)
    print()
예제 #14
0
        def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
            nonlocal model_dir
            mode.check(False)
            data_dir = abspath(MNIST_PATH)
            model_dir = abspath(model_dir)
            ckpt_dir = f"{model_dir}/ckpts"
            create_model = lambda: LeNet(data_format="channels_first")
            graph = LeNet.graph().load()
            model_fn = partial(model_fn_with_fetch_hook,
                               create_model=create_model,
                               graph=graph)

            predicted_label = predict(
                create_model=create_model,
                input_fn=lambda: mnist.test(data_dir).filter(
                    lambda image, label: tf.equal(
                        tf.convert_to_tensor(class_id, dtype=tf.int32), label))
                .skip(image_id).take(1).batch(1),
                model_dir=ckpt_dir,
            )
            if predicted_label != class_id:
                return [{}] if per_node else {}

            adversarial_example = lenet_mnist_example(
                attack_name=attack_name,
                attack_fn=attack_fn,
                generate_adversarial_fn=generate_adversarial_fn,
                class_id=class_id,
                image_id=image_id,
                # model_dir not ckpt_dir
                model_dir=model_dir,
                transforms=transforms,
                transform_name=transform_name,
            ).load()

            if adversarial_example is None:
                return [{}] if per_node else {}

            adversarial_predicted_label = predict(
                create_model=create_model,
                input_fn=lambda: tf.data.Dataset.from_tensors(
                    mnist.normalize(adversarial_example)),
                model_dir=ckpt_dir,
            )

            if predicted_label == adversarial_predicted_label:
                return [{}] if per_node else {}

            trace = reconstruct_trace_from_tf(
                class_id=class_id,
                model_fn=model_fn,
                input_fn=lambda: mnist.test(data_dir, transforms=transforms).
                filter(lambda image, label: tf.equal(
                    tf.convert_to_tensor(class_id, dtype=tf.int32), label)
                       ).skip(image_id).take(1).batch(1),
                select_fn=select_fn,
                model_dir=ckpt_dir,
                per_channel=per_channel,
            )[0]

            if trace is None:
                return [{}] if per_node else {}

            adversarial_trace = reconstruct_trace_from_tf_brute_force(
                model_fn=model_fn,
                input_fn=lambda: tf.data.Dataset.from_tensors(
                    mnist.normalize(adversarial_example)),
                select_fn=select_fn,
                model_dir=ckpt_dir,
                per_channel=per_channel,
            )[0]

            adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]

            # adversarial_pred, violation = \
            #     detect_by_reduced_edge_count_violation(
            #         class_trace_fn(adversarial_label).load(),
            #                         adversarial_trace,
            #                         reduce_mode,
            #     )
            # row = {
            #     "image_id": image_id,
            #     "class_id": class_id,
            #     "original.prediction":
            #         detect_by_reduced_edge(class_trace_fn(class_id).load(),
            #                                 trace,
            #                                 reduce_mode,
            #                                 ),
            #     "adversarial.prediction":
            #         adversarial_pred,
            #     "violation": violation,
            # }

            original_pred, violation = \
                detect_by_reduced_edge_count_violation(
                    class_trace_fn(class_id).load(),
                                    trace,
                                    reduce_mode,
                )
            row = {
                "image_id":
                image_id,
                "class_id":
                class_id,
                "original.prediction":
                original_pred,
                "adversarial.prediction":
                detect_by_reduced_edge(
                    class_trace_fn(adversarial_label).load(),
                    adversarial_trace,
                    reduce_mode,
                ),
                "violation":
                violation,
            }
            return row
예제 #15
0
def train(
    batch_size: int = 64,
    train_epochs: int = 10,
    data_format: str = "channels_first",
    multi_gpu: bool = False,
    label: str = None,
):
    tf.logging.set_verbosity(tf.logging.INFO)
    if label is None:
        model_dir = abspath(f"{root_dir}/model")
    else:
        model_dir = abspath(f"{root_dir}/model_{label}")
    ckpt_dir = os.path.join(model_dir, "ckpts")
    data_dir = abspath(MNIST_PATH)

    model_function = model_fn

    if multi_gpu:
        validate_batch_size_for_multi_gpu(batch_size)

        # There are two steps required if using multi-GPU: (1) wrap the model_fn,
        # and (2) wrap the optimizer. The first happens here, and (2) happens
        # in the model_fn itself when the optimizer is defined.
        model_function = tf.contrib.estimator.replicate_model_fn(
            model_fn, loss_reduction=tf.losses.Reduction.MEAN)

    if data_format is None:
        data_format = ("channels_first"
                       if tf.test.is_built_with_cuda() else "channels_last")
    estimator_config = tf.estimator.RunConfig(
        keep_checkpoint_max=None,
        session_config=new_session_config(parallel=0))
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=ckpt_dir,
        params={
            "data_format": data_format,
            "multi_gpu": multi_gpu,
            "batch_size": batch_size,
        },
        config=estimator_config,
    )

    for epoch in range(train_epochs):
        # Train the model
        def train_input_fn():
            # When choosing shuffle buffer sizes, larger sizes result in better
            # randomness, while smaller sizes use less memory. MNIST is a small
            # enough dataset that we can easily shuffle the full epoch.
            ds = mnist.train(
                data_dir,
                transforms=transforms,
            )
            ds = ds.cache().shuffle(buffer_size=60000).batch(batch_size)
            return ds

        # Set up training hook that logs the training accuracy every 100 steps.
        tensors_to_log = {"train_accuracy": "train_accuracy"}
        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)
        classifier.train(input_fn=train_input_fn, hooks=[logging_hook])

        # Evaluate the model and print results
        def eval_input_fn():
            return (mnist.test(data_dir, ).batch(
                batch_size).make_one_shot_iterator().get_next())

        eval_results = classifier.evaluate(input_fn=eval_input_fn)
    print(label)
    print("Evaluation results:\n\t%s" % eval_results)
    print()
예제 #16
0
        def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
            nonlocal model_dir
            mode.check(False)
            data_dir = abspath(MNIST_PATH)
            model_dir = abspath(model_dir)
            ckpt_dir = f"{model_dir}/ckpts"
            create_model = lambda: LeNet(data_format="channels_first")
            graph = LeNet.graph().load()

            model_fn = partial(model_fn_with_fetch_hook,
                               create_model=create_model,
                               graph=graph)
            if mnist_dataset_mode == "test":
                dataset = mnist.test
            elif mnist_dataset_mode == "train":
                dataset = mnist.train
            else:
                raise RuntimeError("Dataset invalid")

            predicted_label = predict(
                create_model=create_model,
                # dataset may be train or test, should be consistent with lenet_mnist_example
                input_fn=lambda: dataset(data_dir, ).filter(
                    lambda image, label: tf.equal(
                        tf.convert_to_tensor(class_id, dtype=tf.int32), label))
                .skip(image_id).take(1).batch(1),
                model_dir=ckpt_dir,
            )
            if predicted_label != class_id:
                return [{}] if per_node else {}

            adversarial_example = lenet_mnist_example(
                attack_name=attack_name,
                attack_fn=attack_fn,
                generate_adversarial_fn=generate_adversarial_fn,
                class_id=class_id,
                image_id=image_id,
                # model_dir not ckpt_dir
                model_dir=model_dir,
                transforms=transforms,
                transform_name=transform_name,
                mode=mnist_dataset_mode,
            ).load()

            if adversarial_example is None:
                return [{}] if per_node else {}

            adversarial_predicted_label = predict(
                create_model=create_model,
                input_fn=lambda: tf.data.Dataset.from_tensors(
                    mnist.normalize(adversarial_example)),
                model_dir=ckpt_dir,
            )

            if predicted_label == adversarial_predicted_label:
                return [{}] if per_node else {}

            if use_class_trace:
                class_trace_avg = ClassTraceIOAction(predicted_label).load()
            else:
                class_trace_avg = None

            trace = reconstruct_trace_from_tf(
                class_id=class_id,
                model_fn=model_fn,
                input_fn=lambda: dataset(
                    data_dir,
                    transforms=transforms,
                ).filter(lambda image, label: tf.equal(
                    tf.convert_to_tensor(class_id, dtype=tf.int32), label)).
                skip(image_id).take(1).batch(1),
                select_fn=select_fn,
                model_dir=ckpt_dir,
                per_channel=per_channel,
                class_trace=class_trace_avg,
            )[0]

            if trace is None:
                return [{}] if per_node else {}
            path = os.path.join(save_dir, f"original_{transform_name}",
                                f"{class_id}", f"{image_id}.pkl")
            ensure_dir(path)
            with open(path, "wb") as f:
                pickle.dump(trace, f)

            adversarial_trace = reconstruct_trace_from_tf(
                model_fn=model_fn,
                input_fn=lambda: tf.data.Dataset.from_tensors(
                    mnist.normalize(adversarial_example)),
                select_fn=select_fn,
                model_dir=ckpt_dir,
                per_channel=per_channel,
                class_trace=class_trace_avg,
            )[0]

            path = os.path.join(save_dir, f"{attack_name}_{transform_name}",
                                f"{class_id}", f"{image_id}.pkl")
            ensure_dir(path)
            with open(path, "wb") as f:
                pickle.dump(adversarial_trace, f)

            row = {
                "class_id": class_id,
                "image_id": image_id,
                "trace": trace,
                "adversarial_trace": adversarial_trace,
            }
            # row = calc_all_overlap(
            #     class_trace_fn(class_id).load(), adversarial_trace, overlap_fn
            # )
            return row
예제 #17
0
def forward_propagate(
    create_model,
    input_fn,
    forward_fn: Callable[[tf.Tensor], tf.Tensor],
    model_dir: str,
    data_format: str = "channels_first",
    parallel: int = 1,
    prediction_hooks: List[SessionRunHook] = None,
    tag: str = "noop",
) -> Union[int, float]:

    def model_fn(features, labels, mode, params):
        image = features
        if isinstance(image, dict):
            image = features["image"]

        # Save inputs for visualization in tensorboard
        nonlocal prediction_hooks
        feature_trans = tf.transpose(image, perm=[0,2,3,1])
        image_summary = tf.summary.image(f"input_{tag}", feature_trans, max_outputs=100)
        eval_summary_hook = tf.train.SummarySaverHook(
                            summary_op=image_summary,
                            save_steps=2,
                            output_dir=result_dir)
        if prediction_hooks is None:
            prediction_hooks = [eval_summary_hook]
        else:
            prediction_hooks.append(eval_summary_hook)

        if mode == tf.estimator.ModeKeys.PREDICT:
            logits = create_model()(image, training=False)
            predictions = {"classes": forward_fn(logits)}
            return tf.estimator.EstimatorSpec(
                mode=tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                prediction_hooks=prediction_hooks,
                export_outputs={
                    "classify": tf.estimator.export.PredictOutput(predictions)
                },
            )

    model_dir = abspath(model_dir)
    model_function = model_fn
    if data_format is None:
        data_format = (
            "channels_first" if tf.test.is_built_with_cuda() else "channels_last"
        )
    estimator_config = tf.estimator.RunConfig(
        session_config=new_session_config(parallel=parallel)
    )
    if not os.path.exists(model_dir):
        raise RuntimeError(f"model directory {model_dir} is not existed")
    # st()
    classifier = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=model_dir,
        params={"data_format": data_format},
        config=estimator_config,
    )

    result = list(classifier.predict(input_fn=input_fn))
    return result[0]["classes"]
예제 #18
0
def save_graph(
    attack_name: str,
    attack_fn,
    generate_adversarial_fn,
    select_fn: Callable[[np.ndarray], np.ndarray],
    image_id_index,
    batch_size,
    class_id,
    # model_dir = "result/lenet/model_augmentation",
    model_dir=model_dir,
    transforms=None,
    transform_name="noop",
    graph_dir="result/test",
    dataset_mode=dataset_mode,
    images_per_class=1,
    compute_adversarial=True,
    **kwargs,
):

    data_dir = abspath(MNIST_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"
    create_model = lambda: LeNet(data_format="channels_first")
    graph = LeNet.graph().load()

    batch_size = min(batch_size, images_per_class - image_id_index)
    if dataset_mode == "test":
        dataset = mnist.test
    elif dataset_mode == "train":
        dataset = mnist.train
    else:
        raise RuntimeError("Dataset invalid")

    predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda:
        (dataset(data_dir).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id_index).take(batch_size).batch(batch_size).
         make_one_shot_iterator().get_next()[0]),
        model_dir=ckpt_dir,
    )
    prediction_valid = (predicted_label == class_id)

    # The shape of each example is (1, 32, 32, 3)
    adversarial_examples = [
        lenet_mnist_example(
            attack_name=attack_name,
            attack_fn=attack_fn,
            generate_adversarial_fn=generate_adversarial_fn,
            class_id=class_id,
            image_id=image_id,
            # model_dir not ckpt_dir
            model_dir=model_dir,
            transforms=transforms,
            transform_name=transform_name,
            mode=dataset_mode,
        ).load()
        for image_id in range(image_id_index, image_id_index + batch_size)
    ]

    adversarial_valid = np.array(
        [example is not None for example in adversarial_examples])

    adversarial_examples = [
        example if example is not None else np.zeros((1, 1, 28, 28))
        for example in adversarial_examples
    ]
    adversarial_examples = np.squeeze(np.array(adversarial_examples).astype(
        np.float32),
                                      axis=1)

    # adversarial_example is [0, 1] of shape (1, 28, 28)
    adversarial_predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda: tf.data.Dataset.from_tensors(
            mnist.normalize(adversarial_examples)),
        model_dir=ckpt_dir,
    )
    adversarial_prediction_valid = adversarial_predicted_label != class_id

    batch_valid = (prediction_valid * adversarial_valid *
                   adversarial_prediction_valid)
    original_graph_dir = os.path.join(graph_dir, f"original_{transform_name}",
                                      f"{class_id}")
    original_graph_saver = IOBatchAction(
        dir=original_graph_dir,
        root_index=image_id_index,
    )
    original_model_fn = partial(
        model_fn_with_fetch_hook,
        create_model=create_model,
        graph=graph,
        graph_saver=original_graph_saver,
        batch_valid=batch_valid,
    )
    trace = reconstruct_trace_from_tf(
        class_id=class_id,
        model_fn=original_model_fn,
        input_fn=lambda: (dataset(
            data_dir,
            transforms=transforms,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id_index).take(batch_size).batch(batch_size).
                          make_one_shot_iterator().get_next()[0]),
        select_fn=select_fn,
        model_dir=ckpt_dir,
    )

    if compute_adversarial:
        adversarial_graph_dir = os.path.join(
            graph_dir, f"{attack_name}_{transform_name}", f"{class_id}")
        adversarial_graph_saver = IOBatchAction(
            dir=adversarial_graph_dir,
            root_index=image_id_index,
        )
        adversarial_model_fn = partial(
            model_fn_with_fetch_hook,
            create_model=create_model,
            graph=graph,
            graph_saver=adversarial_graph_saver,
            batch_valid=batch_valid,
        )

        adversarial_trace = reconstruct_trace_from_tf(
            model_fn=adversarial_model_fn,
            input_fn=lambda: tf.data.Dataset.from_tensors(
                mnist.normalize(adversarial_examples)),
            select_fn=select_fn,
            model_dir=ckpt_dir,
        )
예제 #19
0
def predict_original_adversarial(
    attack_name: str,
    attack_fn,
    generate_adversarial_fn,
    image_id_index,
    batch_size,
    class_id,
    model_dir=model_dir,
    transforms=None,
    transform_name="noop",
    graph_dir="result/test",
    dataset_mode=dataset_mode,
    images_per_class=1,
    **kwargs,
):

    # mode.check(False)
    data_dir = abspath(CIFAR10_PATH)
    model_dir = abspath(model_dir)
    ckpt_dir = f"{model_dir}/ckpts"
    create_model = lambda: partial(
        ResNet10Cifar10(),
        training=False,
    )
    graph = ResNet10Cifar10.graph().load()

    batch_size = min(batch_size, images_per_class - image_id_index)

    predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda: (input_fn_for_adversarial_examples(
            is_training=(dataset_mode == "train"),
            data_dir=data_dir,
            num_parallel_batches=1,
            is_shuffle=False,
            transform_fn=None,
        ).filter(lambda image, label: tf.equal(
            tf.convert_to_tensor(class_id, dtype=tf.int32), label)).skip(
                image_id_index).take(batch_size).batch(batch_size).
                          make_one_shot_iterator().get_next()[0]),
        model_dir=ckpt_dir,
    )

    # The shape of each example is (1, 32, 32, 3)
    adversarial_examples = [
        resnet10_cifar10_example(
            attack_name=attack_name,
            attack_fn=attack_fn,
            generate_adversarial_fn=generate_adversarial_fn,
            class_id=class_id,
            image_id=image_id,
            # model_dir not ckpt_dir
            model_dir=model_dir,
            transforms=transforms,
            transform_name=transform_name,
            dataset_mode=dataset_mode,
        ).load()
        for image_id in range(image_id_index, image_id_index + batch_size)
    ]

    adversarial_valid = np.array(
        [example is not None for example in adversarial_examples])
    adversarial_examples = [
        example if example is not None else np.zeros((1, 32, 32, 3))
        for example in adversarial_examples
    ]
    adversarial_examples = np.squeeze(np.array(adversarial_examples).astype(
        np.float32),
                                      axis=1)

    # adversarial_example is [0, 1] of shape (1, 32, 32, 3)
    adversarial_predicted_label = predict_batch(
        create_model=create_model,
        input_fn=lambda: tf.data.Dataset.from_tensors(adversarial_examples),
        model_dir=ckpt_dir,
    )

    assert predicted_label.shape == adversarial_predicted_label.shape
    original_correct = (predicted_label[adversarial_valid] == class_id).sum()
    adversarial_correct = (
        adversarial_predicted_label[adversarial_valid] == class_num).sum()
    valid_count = adversarial_valid.sum()

    return original_correct, adversarial_correct, valid_count