Esempio n. 1
0
 def __init__(self, model, model_type='classification', targeted=False, reserve_ratio=0.3, sparse=True,
              step_size=0.5, per_bounds=0.6, c1=2.0, c2=2.0, c=2.0, pop_size=6, t_max=1000, pm=0.5, bounds=None):
     super(PSOAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._step_size = check_value_positive('step_size', step_size)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._c1 = check_value_positive('c1', c1)
     self._c2 = check_value_positive('c2', c2)
     self._c = check_value_positive('c', c)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._pm = check_value_non_negative('pm', pm)
     if self._pm > 1:
         msg = "pm should not be greater than 1.0, but got {}.".format(self._pm)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._targeted = check_param_type('targeted', targeted, bool)
     self._t_max = check_int_positive('t_max', t_max)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio)
     if self._reserve_ratio > 1:
         msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
    def __init__(self, raw_preds, def_preds, raw_query_counts,
                 def_query_counts, raw_query_time, def_query_time,
                 def_detection_counts, true_labels, max_queries):
        self._raw_preds, self._def_preds = check_pair_numpy_param(
            'raw_preds', raw_preds, 'def_preds', def_preds)
        self._num_samples = self._raw_preds.shape[0]
        self._raw_query_counts, _ = check_equal_length('raw_query_counts',
                                                       raw_query_counts,
                                                       'number of sample',
                                                       self._raw_preds)
        self._def_query_counts, _ = check_equal_length('def_query_counts',
                                                       def_query_counts,
                                                       'number of sample',
                                                       self._raw_preds)
        self._raw_query_time, _ = check_equal_length('raw_query_time',
                                                     raw_query_time,
                                                     'number of sample',
                                                     self._raw_preds)
        self._def_query_time, _ = check_equal_length('def_query_time',
                                                     def_query_time,
                                                     'number of sample',
                                                     self._raw_preds)

        self._num_adv_samples = self._raw_query_counts[
            self._raw_query_counts > 0].shape[0]

        self._num_adv_samples = check_int_positive(
            'the number of adversarial samples', self._num_adv_samples)

        self._num_ben_samples = self._num_samples - self._num_adv_samples
        self._max_queries = check_int_positive('max_queries', max_queries)

        self._def_detection_counts = check_numpy_param('def_detection_counts',
                                                       def_detection_counts)
        self._true_labels = check_numpy_param('true_labels', true_labels)
Esempio n. 3
0
 def __init__(self,
              model,
              pop_size=6,
              mutation_rate=0.005,
              per_bounds=0.15,
              max_steps=1000,
              step_size=0.20,
              temp=0.3,
              bounds=(0, 1.0),
              adaptive=False,
              sparse=True):
     super(GeneticAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._step_size = check_value_positive('step_size', step_size)
     self._temp = check_value_positive('temp', temp)
     self._max_steps = check_int_positive('max_steps', max_steps)
     self._mutation_rate = check_value_positive('mutation_rate',
                                                mutation_rate)
     self._adaptive = check_param_type('adaptive', adaptive, bool)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     # initial global optimum fitness value
     self._best_fit = -1
     # count times of no progress
     self._plateau_times = 0
     # count times of changing attack step
     self._adap_times = 0
     self._sparse = check_param_type('sparse', sparse, bool)
Esempio n. 4
0
 def __init__(self,
              model,
              step_size=0.5,
              per_bounds=0.6,
              c1=2.0,
              c2=2.0,
              c=2.0,
              pop_size=6,
              t_max=1000,
              pm=0.5,
              bounds=None,
              targeted=False,
              reduction_iters=3,
              sparse=True):
     super(PSOAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._step_size = check_value_positive('step_size', step_size)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._c1 = check_value_positive('c1', c1)
     self._c2 = check_value_positive('c2', c2)
     self._c = check_value_positive('c', c)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._pm = check_value_positive('pm', pm)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     self._targeted = check_param_type('targeted', targeted, bool)
     self._t_max = check_int_positive('t_max', t_max)
     self._reduce_iters = check_int_positive('reduction_iters',
                                             reduction_iters)
     self._sparse = check_param_type('sparse', sparse, bool)
Esempio n. 5
0
 def __init__(self,
              network,
              eps=1e-5,
              bounds=(0.0, 1.0),
              is_targeted=True,
              nb_iter=150,
              search_iters=30,
              loss_fn=None,
              sparse=False):
     super(LBFGS, self).__init__()
     self._network = check_model('network', network, Cell)
     self._eps = check_value_positive('eps', eps)
     self._is_targeted = check_param_type('is_targeted', is_targeted, bool)
     self._nb_iter = check_int_positive('nb_iter', nb_iter)
     self._search_iters = check_int_positive('search_iters', search_iters)
     if loss_fn is None:
         loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                 sparse=False)
     with_loss_cell = WithLossCell(self._network, loss_fn)
     self._grad_all = GradWrapWithLoss(with_loss_cell)
     self._dtype = None
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     self._sparse = check_param_type('sparse', sparse, bool)
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     box_max, box_min = bounds
     if box_max < box_min:
         self._box_min = box_max
         self._box_max = box_min
     else:
         self._box_min = box_min
         self._box_max = box_max
Esempio n. 6
0
 def __init__(self, initial_seeds, target_model, train_dataset, const_K,
              mode='L', max_seed_num=1000):
     self.initial_seeds = initial_seeds
     self.target_model = check_model('model', target_model, Model)
     self.train_dataset = check_numpy_param('train_dataset', train_dataset)
     self.const_k = check_int_positive('const_k', const_K)
     self.mode = mode
     self.max_seed_num = check_int_positive('max_seed_num', max_seed_num)
     self.coverage_metrics = ModelCoverageMetrics(target_model, 1000, 10,
                                                  train_dataset)
Esempio n. 7
0
 def __init__(self, model, segmented_num, neuron_num, train_dataset):
     self._model = check_model('model', model, Model)
     self._segmented_num = check_int_positive('segmented_num',
                                              segmented_num)
     self._neuron_num = check_int_positive('neuron_num', neuron_num)
     train_dataset = check_numpy_param('train_dataset', train_dataset)
     self._lower_bounds = [np.inf] * neuron_num
     self._upper_bounds = [-np.inf] * neuron_num
     self._var = [0] * neuron_num
     self._main_section_hits = [[0 for _ in range(self._segmented_num)]
                                for _ in range(self._neuron_num)]
     self._lower_corner_hits = [0] * self._neuron_num
     self._upper_corner_hits = [0] * self._neuron_num
     self._bounds_get(train_dataset)
Esempio n. 8
0
 def __init__(self, network, num_classes, max_iters=50, overshoot=0.02,
              norm_level=2, bounds=None, sparse=True):
     super(DeepFool, self).__init__()
     self._network = check_model('network', network, Cell)
     self._network.set_grad(True)
     self._max_iters = check_int_positive('max_iters', max_iters)
     self._overshoot = check_value_positive('overshoot', overshoot)
     self._norm_level = check_norm_level(norm_level)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._net_grad = GradWrap(self._network)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     self._sparse = check_param_type('sparse', sparse, bool)
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
Esempio n. 9
0
    def batch_defense(self, inputs, labels, batch_size=32, epochs=5):
        """
        Defense model with samples in batch.

        Args:
            inputs (numpy.ndarray): Samples based on which adversarial
                examples are generated.
            labels (numpy.ndarray): Labels of input samples.
            batch_size (int): Number of samples in one batch.
            epochs (int): Number of epochs.

        Returns:
            numpy.ndarray, loss of batch_defense operation.

        Raises:
            ValueError: If batch_size is 0.
        """
        inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels',
                                                labels)
        x_len = len(inputs)
        batch_size = check_int_positive('batch_size', batch_size)

        iters_per_epoch = int(x_len / batch_size)
        loss = None
        for _ in range(epochs):
            for step in range(iters_per_epoch):
                x_batch = inputs[step * batch_size:(step + 1) * batch_size]
                y_batch = labels[step * batch_size:(step + 1) * batch_size]
                loss = self.defense(x_batch, y_batch)
        return loss
Esempio n. 10
0
 def __init__(self, network, num_classes, box_min=0.0, box_max=1.0,
              theta=1.0, max_iteration=1000, max_count=3, increase=True,
              sparse=True):
     super(JSMAAttack).__init__()
     LOGGER.debug(TAG, "init jsma class.")
     self._network = check_model('network', network, Cell)
     self._min = check_value_non_negative('box_min', box_min)
     self._max = check_value_non_negative('box_max', box_max)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._theta = check_value_positive('theta', theta)
     self._max_iter = check_int_positive('max_iteration', max_iteration)
     self._max_count = check_int_positive('max_count', max_count)
     self._increase = check_param_type('increase', increase, bool)
     self._net_grad = GradWrap(self._network)
     self._bit_map = None
     self._sparse = check_param_type('sparse', sparse, bool)
    def _bounds_get(self, train_dataset, batch_size=32):
        """
        Update the lower and upper boundaries of neurons' outputs.

        Args:
            train_dataset (numpy.ndarray): Training dataset used for
                determine the neurons' output boundaries.
            batch_size (int): The number of samples in a predict batch.
                Default: 32.
        """
        batch_size = check_int_positive('batch_size', batch_size)
        output_mat = []
        batches = train_dataset.shape[0] // batch_size
        for i in range(batches):
            inputs = train_dataset[i * batch_size:(i + 1) * batch_size]
            output = self._model.predict(Tensor(inputs)).asnumpy()
            output_mat.append(output)
            lower_compare_array = np.concatenate(
                [output, np.array([self._lower_bounds])], axis=0)
            self._lower_bounds = np.min(lower_compare_array, axis=0)
            upper_compare_array = np.concatenate(
                [output, np.array([self._upper_bounds])], axis=0)
            self._upper_bounds = np.max(upper_compare_array, axis=0)
        if batches == 0:
            output = self._model.predict(Tensor(train_dataset)).asnumpy()
            self._lower_bounds = np.min(output, axis=0)
            self._upper_bounds = np.max(output, axis=0)
            output_mat.append(output)
        self._var = np.std(np.concatenate(np.array(output_mat), axis=0),
                           axis=0)
    def test_adequacy_coverage_calculate(self,
                                         dataset,
                                         bias_coefficient=0,
                                         batch_size=32):
        """
        Calculate the testing adequacy of the given dataset.

        Args:
            dataset (numpy.ndarray): Data for fuzz test.
            bias_coefficient (float): The coefficient used for changing the
                neurons' output boundaries. Default: 0.
            batch_size (int): The number of samples in a predict batch.
                Default: 32.

        Examples:
            >>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images)
            >>> model_fuzz_test.test_adequacy_coverage_calculate(test_images)
        """
        dataset = check_numpy_param('dataset', dataset)
        batch_size = check_int_positive('batch_size', batch_size)
        self._lower_bounds -= bias_coefficient * self._var
        self._upper_bounds += bias_coefficient * self._var
        intervals = (self._upper_bounds - self._lower_bounds) / self._k
        batches = dataset.shape[0] // batch_size
        for i in range(batches):
            self._sections_hits_count(
                dataset[i * batch_size:(i + 1) * batch_size], intervals)
    def __init__(self,
                 model,
                 init_num_evals=100,
                 max_num_evals=1000,
                 stepsize_search='geometric_progression',
                 num_iterations=20,
                 gamma=1.0,
                 constraint='l2',
                 batch_size=32,
                 clip_min=0.0,
                 clip_max=1.0,
                 sparse=True):
        super(HopSkipJumpAttack, self).__init__()
        self._model = check_model('model', model, BlackModel)
        self._init_num_evals = check_int_positive('initial_num_evals',
                                                  init_num_evals)
        self._max_num_evals = check_int_positive('max_num_evals',
                                                 max_num_evals)
        self._batch_size = check_int_positive('batch_size', batch_size)
        self._clip_min = check_value_non_negative('clip_min', clip_min)
        self._clip_max = check_value_non_negative('clip_max', clip_max)
        self._sparse = check_param_type('sparse', sparse, bool)
        self._np_dtype = np.dtype('float32')
        if stepsize_search in ['geometric_progression', 'grid_search']:
            self._stepsize_search = stepsize_search
        else:
            msg = "stepsize_search must be in ['geometric_progression'," \
                  " 'grid_search'], but got {}".format(stepsize_search)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)

        self._num_iterations = check_int_positive('num_iterations',
                                                  num_iterations)
        self._gamma = check_value_positive('gamma', gamma)
        if constraint in ['l2', 'linf']:
            self._constraint = constraint
        else:
            msg = "constraint must be in ['l2', 'linf'], " \
                  "but got {}".format(constraint)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        self.queries = 0
        self.is_adv = True
        self.y_targets = None
        self.image_targets = None
        self.y_target = None
        self.image_target = None
Esempio n. 14
0
    def __init__(self, model, scene, max_queries=10000, top_k=-1, num_class=10, batch_size=128, epsilon=0.3,
                 samples_per_draw=128, momentum=0.9, learning_rate=1e-3, max_lr=5e-2, min_lr=5e-4, sigma=1e-3,
                 plateau_length=20, plateau_drop=2.0, adv_thresh=0.25, zero_iters=10, starting_eps=1.0,
                 starting_delta_eps=0.5, label_only_sigma=1e-3, conservative=2, sparse=True):
        super(NES, self).__init__()
        self._model = check_model('model', model, BlackModel)
        self._scene = scene

        self._max_queries = check_int_positive('max_queries', max_queries)
        self._num_class = check_int_positive('num_class', num_class)
        self._batch_size = check_int_positive('batch_size', batch_size)
        self._samples_per_draw = check_int_positive('samples_per_draw', samples_per_draw)
        self._goal_epsilon = check_value_positive('epsilon', epsilon)
        self._momentum = check_value_positive('momentum', momentum)
        self._learning_rate = check_value_positive('learning_rate', learning_rate)
        self._max_lr = check_value_positive('max_lr', max_lr)
        self._min_lr = check_value_positive('min_lr', min_lr)
        self._sigma = check_value_positive('sigma', sigma)
        self._plateau_length = check_int_positive('plateau_length', plateau_length)
        self._plateau_drop = check_value_positive('plateau_drop', plateau_drop)
        # partial information arguments
        self._k = top_k
        self._adv_thresh = check_value_positive('adv_thresh', adv_thresh)
        # label only arguments
        self._zero_iters = check_int_positive('zero_iters', zero_iters)
        self._starting_eps = check_value_positive('starting_eps', starting_eps)
        self._starting_delta_eps = check_value_positive('starting_delta_eps', starting_delta_eps)
        self._label_only_sigma = check_value_positive('label_only_sigma', label_only_sigma)
        self._conservative = check_int_positive('conservative', conservative)
        self._sparse = check_param_type('sparse', sparse, bool)
        self.target_imgs = None
        self.target_img = None
        self.target_class = None
Esempio n. 15
0
 def __init__(self, model, neuron_num, segmented_num, train_dataset):
     self._model = check_model('model', model, Model)
     self._segmented_num = check_int_positive('segmented_num',
                                              segmented_num)
     self._neuron_num = check_int_positive('neuron_num', neuron_num)
     if self._neuron_num > 1e+9:
         msg = 'neuron_num should be less than 1e+10, otherwise a MemoryError would occur'
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     train_dataset = check_numpy_param('train_dataset', train_dataset)
     self._lower_bounds = [np.inf] * self._neuron_num
     self._upper_bounds = [-np.inf] * self._neuron_num
     self._var = [0] * self._neuron_num
     self._main_section_hits = [[0 for _ in range(self._segmented_num)]
                                for _ in range(self._neuron_num)]
     self._lower_corner_hits = [0] * self._neuron_num
     self._upper_corner_hits = [0] * self._neuron_num
     self._bounds_get(train_dataset)
Esempio n. 16
0
 def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000,
              max_buffer_size=10000, tuning=False, fpr=0.001):
     super(SimilarityDetector, self).__init__()
     self._max_k_neighbor = check_int_positive('max_k_neighbor',
                                               max_k_neighbor)
     self._trans_model = check_model('trans_model', trans_model, Model)
     self._tuning = check_param_type('tuning', tuning, bool)
     self._chunk_size = check_int_positive('chunk_size', chunk_size)
     self._max_buffer_size = check_int_positive('max_buffer_size',
                                                max_buffer_size)
     self._fpr = check_param_in_range('fpr', fpr, 0, 1)
     self._num_of_neighbors = None
     self._threshold = None
     self._num_queries = 0
     # Stores recently processed queries
     self._buffer = []
     # Tracks indexes of detected queries
     self._detected_queries = []
Esempio n. 17
0
 def __init__(self,
              model,
              max_iter=1000,
              search_iter=10,
              is_targeted=False,
              init_attack=None,
              sparse=True):
     super(PointWiseAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._max_iter = check_int_positive('max_iter', max_iter)
     self._search_iter = check_int_positive('search_iter', search_iter)
     self._is_targeted = check_param_type('is_targeted', is_targeted, bool)
     if init_attack is None:
         self._init_attack = SaltAndPepperNoiseAttack(
             model, is_targeted=self._is_targeted)
     else:
         self._init_attack = init_attack
     self._sparse = check_param_type('sparse', sparse, bool)
Esempio n. 18
0
 def __init__(self, model, bounds=(0.0, 1.0), max_iter=100,
              is_targeted=False, sparse=True):
     super(SaltAndPepperNoiseAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._bounds = check_param_multi_types('bounds', bounds, [tuple, list])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     self._max_iter = check_int_positive('max_iter', max_iter)
     self._is_targeted = check_param_type('is_targeted', is_targeted, bool)
     self._sparse = check_param_type('sparse', sparse, bool)
Esempio n. 19
0
 def __init__(self,
              network,
              input_shape,
              input_bound,
              loss_weights=(1, 0.2, 5)):
     self._network = check_param_type('network', network, Cell)
     for sub_loss_weight in loss_weights:
         check_value_positive('sub_loss_weight', sub_loss_weight)
     self._loss = InversionLoss(self._network, loss_weights)
     self._input_shape = check_param_type('input_shape', input_shape, tuple)
     for shape_dim in input_shape:
         check_int_positive('shape_dim', shape_dim)
     self._input_bound = check_param_multi_types('input_bound', input_bound,
                                                 [list, tuple])
     for value_bound in self._input_bound:
         check_param_multi_types('value_bound', value_bound, [float, int])
     if self._input_bound[0] > self._input_bound[1]:
         msg = 'input_bound[0] should not be larger than input_bound[1], but got them as {} and {}'.format(
             self._input_bound[0], self._input_bound[1])
         raise ValueError(msg)
Esempio n. 20
0
    def set_threshold(self, num_of_neighbors, threshold):
        """
        Set the parameters num_of_neighbors and threshold.

        Args:
            num_of_neighbors (int): Number of the nearest neighbors.
            threshold (float): Detection threshold. Default: None.
        """
        self._num_of_neighbors = check_int_positive('num_of_neighbors',
                                                    num_of_neighbors)
        self._threshold = check_value_positive('threshold', threshold)
Esempio n. 21
0
 def __init__(self, auto_encoder, model, option="jsd",
              t=1, bounds=(0.0, 1.0)):
     super(DivergenceBasedDetector, self).__init__(auto_encoder,
                                                   bounds=bounds)
     self._auto_encoder = auto_encoder
     self._model = check_model('targeted model', model, Model)
     self._threshold = 0.0
     self._option = option
     self._t = check_int_positive('t', t)
     self._bounds = check_param_multi_types('bounds', bounds, [tuple, list])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
Esempio n. 22
0
 def __init__(self, model, model_type='classification', targeted=True, reserve_ratio=0.3, sparse=True,
              pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3,
              bounds=(0, 1.0), adaptive=False, c=0.1):
     super(GeneticAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._targeted = check_param_type('targeted', targeted, bool)
     self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio)
     if self._reserve_ratio > 1:
         msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._step_size = check_value_positive('step_size', step_size)
     self._temp = check_value_positive('temp', temp)
     self._max_steps = check_int_positive('max_steps', max_steps)
     self._mutation_rate = check_value_non_negative('mutation_rate', mutation_rate)
     if self._mutation_rate > 1:
         msg = "mutation_rate should not be greater than 1.0, but got {}.".format(self._mutation_rate)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._adaptive = check_param_type('adaptive', adaptive, bool)
     # initial global optimum fitness value
     self._best_fit = -np.inf
     # count times of no progress
     self._plateau_times = 0
     # count times of changing attack step_size
     self._adap_times = 0
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._c = check_value_positive('c', c)
Esempio n. 23
0
    def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5,
                 max_eps=10.0, target_delta=1e-3, noise_decay_mode='Time',
                 noise_decay_rate=6e-4, per_print_times=50, dataset_sink_mode=False):
        super(ZCDPMonitor, self).__init__()
        check_int_positive('num_samples', num_samples)
        check_int_positive('batch_size', batch_size)
        if batch_size >= num_samples:
            msg = 'Batch_size must be less than num_samples.'
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        check_value_positive('initial_noise_multiplier',
                             initial_noise_multiplier)
        if noise_decay_mode is not None:
            if noise_decay_mode not in ('Step', 'Time', 'Exp'):
                msg = "Noise decay mode must be in ('Step', 'Time', 'Exp'), but got {}.".\
                    format(noise_decay_mode)
                LOGGER.error(TAG, msg)
                raise ValueError(msg)
            noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float)
            check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0)
        check_int_positive('per_print_times', per_print_times)
        check_param_type('dataset_sink_mode', dataset_sink_mode, bool)

        self._num_samples = num_samples
        self._batch_size = batch_size
        self._initial_noise_multiplier = initial_noise_multiplier
        self._max_eps = check_value_positive('max_eps', max_eps)
        self._target_delta = check_param_in_range('target_delta', target_delta, 0.0, 1.0)
        self._noise_decay_mode = noise_decay_mode
        self._noise_decay_rate = noise_decay_rate
        # initialize zcdp
        self._zcdp = 0
        self._per_print_times = per_print_times
        if dataset_sink_mode:
            self._per_print_times = int(self._num_samples / self._batch_size)
Esempio n. 24
0
 def __init__(self,
              network,
              num_classes,
              box_min=0.0,
              box_max=1.0,
              bin_search_steps=5,
              max_iterations=1000,
              confidence=0,
              learning_rate=5e-3,
              initial_const=1e-2,
              abort_early_check_ratio=5e-2,
              targeted=False,
              fast=True,
              abort_early=True,
              sparse=True):
     LOGGER.info(TAG, "init CW object.")
     super(CarliniWagnerL2Attack, self).__init__()
     self._network = check_model('network', network, Cell)
     self._network.set_grad(True)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._min = check_param_type('box_min', box_min, float)
     self._max = check_param_type('box_max', box_max, float)
     self._bin_search_steps = check_int_positive('search_steps',
                                                 bin_search_steps)
     self._max_iterations = check_int_positive('max_iterations',
                                               max_iterations)
     self._confidence = check_param_multi_types('confidence', confidence,
                                                [int, float])
     self._learning_rate = check_value_positive('learning_rate',
                                                learning_rate)
     self._initial_const = check_value_positive('initial_const',
                                                initial_const)
     self._abort_early = check_param_type('abort_early', abort_early, bool)
     self._fast = check_param_type('fast', fast, bool)
     self._abort_early_check_ratio = check_value_positive(
         'abort_early_check_ratio', abort_early_check_ratio)
     self._targeted = check_param_type('targeted', targeted, bool)
     self._net_grad = GradWrap(self._network)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._dtype = None
Esempio n. 25
0
 def __init__(self, model, ksize=3, is_local_smooth=True,
              metric='l1', false_positive_ratio=0.05):
     super(SpatialSmoothing, self).__init__()
     self._ksize = check_int_positive('ksize', ksize)
     self._is_local_smooth = check_param_type('is_local_smooth',
                                              is_local_smooth,
                                              bool)
     self._model = check_model('model', model, Model)
     self._metric = metric
     self._fpr = check_param_in_range('false_positive_ratio',
                                      false_positive_ratio,
                                      0, 1)
     self._threshold = None
Esempio n. 26
0
    def __init__(self,
                 micro_batches=2,
                 norm_bound=1.0,
                 noise_mech=None,
                 clip_mech=None,
                 **kwargs):
        if micro_batches:
            self._micro_batches = check_int_positive('micro_batches',
                                                     micro_batches)
        else:
            self._micro_batches = None
        norm_bound = check_param_type('norm_bound', norm_bound, float)
        norm_bound = check_value_positive('norm_bound', norm_bound)
        norm_bound = Tensor(norm_bound, mstype.float32)
        self._norm_bound = Parameter(norm_bound, 'norm_bound')

        opt = kwargs['optimizer']
        opt_name = opt.__class__.__name__
        # Check whether noise_mech and DPOptimizer are both None or not None, if so, raise ValueError.
        # And check whether noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None,
        # if so, rasie ValuerError too.
        if noise_mech is not None and "DPOptimizer" in opt_name:
            msg = 'DPOptimizer is not supported while noise_mech is not None'
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        if noise_mech is None:
            if "DPOptimizer" in opt_name:
                if context.get_context('mode') != context.PYNATIVE_MODE:
                    msg = 'DPOptimizer just support pynative mode currently.'
                    LOGGER.error(TAG, msg)
                    raise ValueError(msg)
                if 'Ada' in opt._mech.__class__.__name__ and clip_mech is not None:
                    msg = "When DPOptimizer's mech method is adaptive, clip_mech must be None."
                    LOGGER.error(TAG, msg)
                    raise ValueError(msg)
            else:
                msg = 'DPModel should set noise_mech or DPOptimizer configure, ' \
                      'please refer to example.'
                LOGGER.error(TAG, msg)
                raise ValueError(msg)
        self._noise_mech = noise_mech
        if noise_mech is not None:
            if 'Ada' in noise_mech.__class__.__name__ and clip_mech is not None:
                msg = 'When noise_mech is Adaptive, clip_mech must be None.'
                LOGGER.error(TAG, msg)
                raise ValueError(msg)

        if clip_mech is None or isinstance(clip_mech, Cell):
            self._clip_mech = clip_mech
        super(DPModel, self).__init__(**kwargs)
Esempio n. 27
0
 def __init__(self,
              network,
              num_classes,
              model_type='classification',
              reserve_ratio=0.3,
              max_iters=50,
              overshoot=0.02,
              norm_level=2,
              bounds=None,
              sparse=True):
     super(DeepFool, self).__init__()
     self._network = check_model('network', network, Cell)
     self._max_iters = check_int_positive('max_iters', max_iters)
     self._overshoot = check_value_positive('overshoot', overshoot)
     self._norm_level = check_norm_level(norm_level)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._net_grad = GradWrap(self._network)
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds,
                                                [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._sparse = check_param_type('sparse', sparse, bool)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(
             self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._reserve_ratio = check_value_non_negative('reserve_ratio',
                                                    reserve_ratio)
     if self._reserve_ratio > 1:
         msg = 'reserve_ratio should be less than 1.0, but got {}.'.format(
             self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(TAG, msg)
Esempio n. 28
0
 def __init__(self, micro_batches=2, norm_clip=1.0, mech=None, **kwargs):
     if micro_batches:
         self._micro_batches = check_int_positive('micro_batches', micro_batches)
     else:
         self._micro_batches = None
     norm_clip = check_param_type('norm_clip', norm_clip, float)
     self._norm_clip = check_value_positive('norm_clip', norm_clip)
     if mech is not None and "DPOptimizer" in kwargs['optimizer'].__class__.__name__:
         raise ValueError('DPOptimizer is not supported while mech is not None')
     if mech is None:
         if "DPOptimizer" in kwargs['optimizer'].__class__.__name__:
             if context.get_context('mode') != context.PYNATIVE_MODE:
                 raise ValueError('DPOptimizer just support pynative mode currently.')
         else:
             raise ValueError('DPModel should set mech or DPOptimizer configure, please refer to example.')
     self._mech = mech
     super(DPModel, self).__init__(**kwargs)
 def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), nb_iter=5,
              loss_fn=None):
     super(IterativeGradientMethod, self).__init__()
     self._network = check_model('network', network, Cell)
     self._eps = check_value_positive('eps', eps)
     self._eps_iter = check_value_positive('eps_iter', eps_iter)
     self._nb_iter = check_int_positive('nb_iter', nb_iter)
     self._bounds = None
     if bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     if loss_fn is None:
         self._loss_grad = network
     else:
         self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn))
     self._loss_grad.set_train()
Esempio n. 30
0
    def batch_generate(self, inputs, labels, batch_size=64):
        """
        Generate adversarial examples in batch, based on input samples and
        their labels.

        Args:
            inputs (numpy.ndarray): Samples based on which adversarial
                examples are generated.
            labels (numpy.ndarray): Labels of samples, whose values determined
                by specific attacks.
            batch_size (int): The number of samples in one batch.

        Returns:
            numpy.ndarray, generated adversarial examples

        Examples:
            >>> inputs = Tensor([[0.2, 0.4, 0.5, 0.2], [0.7, 0.2, 0.4, 0.3]])
            >>> labels = [3, 0]
            >>> advs = attack.batch_generate(inputs, labels, batch_size=2)
        """
        arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels',
                                              labels)
        len_x = arr_x.shape[0]
        batch_size = check_int_positive('batch_size', batch_size)
        batchs = int(len_x / batch_size)
        rest = len_x - batchs * batch_size
        res = []
        for i in range(batchs):
            x_batch = arr_x[i * batch_size:(i + 1) * batch_size]
            y_batch = arr_y[i * batch_size:(i + 1) * batch_size]
            adv_x = self.generate(x_batch, y_batch)
            # Black-attack methods will return 3 values, just get the second.
            res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)

        if rest != 0:
            x_batch = arr_x[batchs * batch_size:]
            y_batch = arr_y[batchs * batch_size:]
            adv_x = self.generate(x_batch, y_batch)
            # Black-attack methods will return 3 values, just get the second.
            res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)

        adv_x = np.concatenate(res, axis=0)
        return adv_x