def __init__(self, network, num_classes, max_iters=50, overshoot=0.02, norm_level=2, bounds=None, sparse=True): super(DeepFool, self).__init__() self._network = check_model('network', network, Cell) self._network.set_grad(True) self._max_iters = check_int_positive('max_iters', max_iters) self._overshoot = check_value_positive('overshoot', overshoot) self._norm_level = check_norm_level(norm_level) self._num_classes = check_int_positive('num_classes', num_classes) self._net_grad = GradWrap(self._network) self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) self._sparse = check_param_type('sparse', sparse, bool) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float])
def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, theta=1.0, max_iteration=1000, max_count=3, increase=True, sparse=True): super(JSMAAttack).__init__() LOGGER.debug(TAG, "init jsma class.") self._network = check_model('network', network, Cell) self._min = check_value_non_negative('box_min', box_min) self._max = check_value_non_negative('box_max', box_max) self._num_classes = check_int_positive('num_classes', num_classes) self._theta = check_value_positive('theta', theta) self._max_iter = check_int_positive('max_iteration', max_iteration) self._max_count = check_int_positive('max_count', max_count) self._increase = check_param_type('increase', increase, bool) self._net_grad = GradWrap(self._network) self._bit_map = None self._sparse = check_param_type('sparse', sparse, bool)
def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, bin_search_steps=5, max_iterations=1000, confidence=0, learning_rate=5e-3, initial_const=1e-2, abort_early_check_ratio=5e-2, targeted=False, fast=True, abort_early=True, sparse=True): LOGGER.info(TAG, "init CW object.") super(CarliniWagnerL2Attack, self).__init__() self._network = check_model('network', network, Cell) self._network.set_grad(True) self._num_classes = check_int_positive('num_classes', num_classes) self._min = check_param_type('box_min', box_min, float) self._max = check_param_type('box_max', box_max, float) self._bin_search_steps = check_int_positive('search_steps', bin_search_steps) self._max_iterations = check_int_positive('max_iterations', max_iterations) self._confidence = check_param_multi_types('confidence', confidence, [int, float]) self._learning_rate = check_value_positive('learning_rate', learning_rate) self._initial_const = check_value_positive('initial_const', initial_const) self._abort_early = check_param_type('abort_early', abort_early, bool) self._fast = check_param_type('fast', fast, bool) self._abort_early_check_ratio = check_value_positive( 'abort_early_check_ratio', abort_early_check_ratio) self._targeted = check_param_type('targeted', targeted, bool) self._net_grad = GradWrap(self._network) self._sparse = check_param_type('sparse', sparse, bool) self._dtype = None
def __init__(self, network, num_classes, model_type='classification', reserve_ratio=0.3, max_iters=50, overshoot=0.02, norm_level=2, bounds=None, sparse=True): super(DeepFool, self).__init__() self._network = check_model('network', network, Cell) self._max_iters = check_int_positive('max_iters', max_iters) self._overshoot = check_value_positive('overshoot', overshoot) self._norm_level = check_norm_level(norm_level) self._num_classes = check_int_positive('num_classes', num_classes) self._net_grad = GradWrap(self._network) self._bounds = bounds if self._bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._sparse = check_param_type('sparse', sparse, bool) self._model_type = check_param_type('model_type', model_type, str) if self._model_type not in ('classification', 'detection'): msg = "Only 'classification' or 'detection' is supported now, but got {}.".format( self._model_type) LOGGER.error(TAG, msg) raise ValueError(msg) self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio) if self._reserve_ratio > 1: msg = 'reserve_ratio should be less than 1.0, but got {}.'.format( self._reserve_ratio) LOGGER.error(TAG, msg) raise ValueError(TAG, msg)
def _generate_detection(self, inputs, labels): """Generate adversarial examples in detection scenario""" images, auxiliary_inputs = inputs[0], inputs[1:] gt_boxes, gt_labels = labels _, gt_object_nums = _deepfool_detection_scores(inputs, gt_boxes, gt_labels, self._network) if not self._sparse: gt_labels = np.argmax(gt_labels, axis=2) origin_labels = np.zeros(gt_labels.shape[0]) for i in range(gt_labels.shape[0]): origin_labels[i] = np.argmax(np.bincount(gt_labels[i])) images_dtype = images.dtype iteration = 0 num_boxes = gt_labels.shape[1] merge_net = _GetLogits(self._network) detection_net_grad = GradWrap(merge_net) weight = np.squeeze(np.zeros(images.shape[1:])) r_tot = np.zeros(images.shape) x_origin = images while not _is_success((images,) + auxiliary_inputs, gt_boxes, gt_labels, self._network, gt_object_nums, \ self._reserve_ratio) and iteration < self._max_iters: preds_logits = merge_net( *to_tensor_tuple(images), *to_tensor_tuple(auxiliary_inputs)).asnumpy() grads = jacobian_matrix_for_detection( detection_net_grad, (images, ) + auxiliary_inputs, num_boxes, self._num_classes) for idx in range(images.shape[0]): diff_w = np.inf label = int(origin_labels[idx]) auxiliary_input_i = tuple() for item in auxiliary_inputs: auxiliary_input_i += (np.expand_dims(item[idx], axis=0), ) gt_boxes_i = np.expand_dims(gt_boxes[idx], axis=0) gt_labels_i = np.expand_dims(gt_labels[idx], axis=0) inputs_i = (np.expand_dims(images[idx], axis=0), ) + auxiliary_input_i if _is_success(inputs_i, gt_boxes_i, gt_labels_i, self._network, gt_object_nums[idx], self._reserve_ratio): continue for k in range(self._num_classes): if k == label: continue w_k = grads[k, idx, ...] - grads[label, idx, ...] f_k = np.mean( np.abs(preds_logits[idx, :, k, ...] - preds_logits[idx, :, label, ...])) if self._norm_level == 2 or self._norm_level == '2': diff_w_k = abs(f_k) / (np.linalg.norm(w_k) + 1e-8) elif self._norm_level == np.inf \ or self._norm_level == 'inf': diff_w_k = abs(f_k) / (np.linalg.norm(w_k, ord=1) + 1e-8) else: msg = 'ord {} is not available.' \ .format(str(self._norm_level)) LOGGER.error(TAG, msg) raise NotImplementedError(msg) if diff_w_k < diff_w: diff_w = diff_w_k weight = w_k if self._norm_level == 2 or self._norm_level == '2': r_i = diff_w * weight / (np.linalg.norm(weight) + 1e-8) elif self._norm_level == np.inf or self._norm_level == 'inf': r_i = diff_w*np.sign(weight) \ / (np.linalg.norm(weight, ord=1) + 1e-8) else: msg = 'ord {} is not available in normalization,' \ .format(str(self._norm_level)) LOGGER.error(TAG, msg) raise NotImplementedError(msg) r_tot[idx, ...] = r_tot[idx, ...] + r_i if self._bounds is not None: clip_min, clip_max = self._bounds images = x_origin + (1 + self._overshoot) * r_tot * (clip_max - clip_min) images = np.clip(images, clip_min, clip_max) else: images = x_origin + (1 + self._overshoot) * r_tot iteration += 1 images = images.astype(images_dtype) del preds_logits, grads return images