""" import copy import logging import warnings import numpy as np import tensorflow as tf from src.FGSM.cleverhans.cleverhans.attacks.attack import Attack from src.FGSM.cleverhans.cleverhans.model import Model, wrapper_warning_logits, CallableModelWrapper from src.FGSM.cleverhans.cleverhans import utils from src.FGSM.cleverhans.cleverhans import utils_tf np_dtype = np.dtype('float32') _logger = utils.create_logger("cleverhans.attacks.deep_fool") _logger.setLevel(logging.INFO) class DeepFool(Attack): """ DeepFool is an untargeted & iterative attack which is based on an iterative linearization of the classifier. The implementation here is w.r.t. the L2 norm. Paper link: "https://arxiv.org/pdf/1511.04599.pdf" :param model: cleverhans.model.Model :param sess: tf.Session :param dtypestr: dtype of the data :param kwargs: passed through to super constructor """
""" The Attack interface. """ from abc import ABCMeta import collections import warnings import numpy as np import tensorflow as tf from src.FGSM.cleverhans.cleverhans.compat import reduce_max from src.FGSM.cleverhans.cleverhans.model import Model from src.FGSM.cleverhans.cleverhans import utils _logger = utils.create_logger("cleverhans.attacks.attack") class Attack(object): """ Abstract base class for all attack classes. """ __metaclass__ = ABCMeta def __init__(self, model, sess=None, dtypestr='float32', **kwargs): """ :param model: An instance of the cleverhans.model.Model class. :param sess: The (possibly optional) tf.Session to run graphs in. :param dtypestr: Floating point precision to use (change to float64 to avoid numerical instabilities). :param back: (deprecated and will be removed on or after 2019-03-26).
""" # pylint: disable=missing-docstring import logging import numpy as np import tensorflow as tf from src.FGSM.cleverhans.cleverhans.attacks.attack import Attack from src.FGSM.cleverhans.cleverhans.compat import reduce_sum, reduce_max from src.FGSM.cleverhans.cleverhans.model import Model, CallableModelWrapper, wrapper_warning_logits from src.FGSM.cleverhans.cleverhans import utils np_dtype = np.dtype('float32') tf_dtype = tf.as_dtype('float32') _logger = utils.create_logger("cleverhans.attacks.elastic_net_method") _logger.setLevel(logging.INFO) def ZERO(): return np.asarray(0., dtype=np_dtype) class ElasticNetMethod(Attack): """ This attack features L1-oriented adversarial examples and includes the C&W L2 attack as a special case (when beta is set to 0). Adversarial examples attain similar performance to those generated by the C&W L2 attack in the white-box case, and more importantly, have improved transferability properties and complement adversarial training.
attack_params = self.attack_params if attack_params is None: attack_params = {} if self.pass_y: x_adv = self.attack.generate(x_batch, y=y_batch, **attack_params) else: # Some code checks the keys of kwargs, rather than checking if # y is None, so we need to truly not pass y at all, rather than # just passing a None value for it. x_adv = self.attack.generate(x_batch, **attack_params) return (x_batch, y_batch), tuple([x_adv]) _logger = create_logger("cleverhans.evaluation") # Cache for storing output of `batch_eval_multi_worker`'s calls to # `graph_factory`, to avoid making the tf graph too big _batch_eval_multi_worker_cache = {} def _check_x(x): """ Makes sure an `x` argument is a valid numpy dataset. """ if not isinstance(x, np.ndarray): raise TypeError("x must be a numpy array. Typically x contains " "the entire test set inputs.")
""" # pylint: disable=missing-docstring import logging import numpy as np import tensorflow as tf from src.FGSM.cleverhans.cleverhans.attacks.attack import Attack from src.FGSM.cleverhans.cleverhans.compat import reduce_sum, reduce_max from src.FGSM.cleverhans.cleverhans.model import CallableModelWrapper, Model, wrapper_warning_logits from src.FGSM.cleverhans.cleverhans import utils np_dtype = np.dtype('float32') tf_dtype = tf.as_dtype('float32') _logger = utils.create_logger("cleverhans.attacks.carlini_wagner_l2") _logger.setLevel(logging.INFO) class CarliniWagnerL2(Attack): """ This attack was originally proposed by Carlini and Wagner. It is an iterative attack that finds adversarial examples on many defenses that are robust to other attacks. Paper link: https://arxiv.org/abs/1608.04644 At a high level, this attack is an iterative attack using Adam and a specially-chosen loss function to find adversarial examples with lower distortion than other attacks. This comes at the cost of speed, as this attack is often much slower than others.
import os import time import warnings import numpy as np import six from six.moves import xrange import tensorflow as tf from src.FGSM.cleverhans.cleverhans.compat import device_lib from src.FGSM.cleverhans.cleverhans.compat import reduce_sum, reduce_mean from src.FGSM.cleverhans.cleverhans.compat import reduce_max from src.FGSM.cleverhans.cleverhans.compat import softmax_cross_entropy_with_logits from src.FGSM.cleverhans.cleverhans.utils import batch_indices, _ArgsWrapper, create_logger _logger = create_logger("cleverhans.utils.tf") _logger.setLevel(logging.INFO) def model_loss(y, model, mean=True): """ Define loss of TF graph :param y: correct labels :param model: output of the model :param mean: boolean indicating whether should return mean of loss or vector of losses for each input of the batch :return: return mean of loss if True, otherwise return vector with per sample loss """ warnings.warn("This function is deprecated and will be removed on or after" " 2019-04-05. Switch to cleverhans.train.train.")
import time import warnings import math import numpy as np from six.moves import xrange import tensorflow as tf from src.FGSM.cleverhans.cleverhans import canary from src.FGSM.cleverhans.cleverhans.utils import _ArgsWrapper, create_logger from src.FGSM.cleverhans.cleverhans.utils import safe_zip from src.FGSM.cleverhans.cleverhans.utils_tf import infer_devices from src.FGSM.cleverhans.cleverhans.utils_tf import initialize_uninitialized_global_variables _logger = create_logger("train") _logger.setLevel(logging.INFO) def train(sess, loss, x_train, y_train, init_all=False, evaluate=None, feed=None, args=None, rng=None, var_list=None, fprop_args=None, optimizer=None, devices=None, x_batch_preprocessor=None, use_ema=False, ema_decay=.998, run_canary=None, loss_threshold=1e5, dataset_train=None, dataset_size=None): """ Run (optionally multi-replica, synchronous) training to minimize `loss` :param sess: TF session to use when training the graph :param loss: tensor, the loss to minimize :param x_train: numpy array with training inputs or tf Dataset :param y_train: numpy array with training outputs or tf Dataset