Пример #1
0
    def __init__(self,
                 norm_bound=1.5,
                 initial_noise_multiplier=5.0,
                 alpha=6e-4,
                 decay_policy='Step'):
        super(AdaGaussianRandom, self).__init__()
        initial_noise_multiplier = check_value_positive(
            'initial_noise_multiplier', initial_noise_multiplier)
        initial_noise_multiplier = Tensor(
            np.array(initial_noise_multiplier, np.float32))
        self._initial_noise_multiplier = Parameter(
            initial_noise_multiplier, name='initial_noise_multiplier')
        self._noise_multiplier = Parameter(initial_noise_multiplier,
                                           name='noise_multiplier')
        norm_bound = check_value_positive('norm_bound', norm_bound)
        self._norm_bound = Tensor(np.array(norm_bound, np.float32))

        alpha = check_param_type('alpha', alpha, float)
        self._alpha = Tensor(np.array(alpha, np.float32))

        self._decay_policy = check_param_type('decay_policy', decay_policy,
                                              str)
        self._mean = 0.0
        self._sub = P.Sub()
        self._mul = P.Mul()
        self._add = P.TensorAdd()
        self._div = P.Div()
        self._stddev = self._update_stddev()
        self._dtype = mstype.float32
Пример #2
0
 def __init__(self,
              model,
              pop_size=6,
              mutation_rate=0.005,
              per_bounds=0.15,
              max_steps=1000,
              step_size=0.20,
              temp=0.3,
              bounds=(0, 1.0),
              adaptive=False,
              sparse=True):
     super(GeneticAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._step_size = check_value_positive('step_size', step_size)
     self._temp = check_value_positive('temp', temp)
     self._max_steps = check_int_positive('max_steps', max_steps)
     self._mutation_rate = check_value_positive('mutation_rate',
                                                mutation_rate)
     self._adaptive = check_param_type('adaptive', adaptive, bool)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     # initial global optimum fitness value
     self._best_fit = -1
     # count times of no progress
     self._plateau_times = 0
     # count times of changing attack step
     self._adap_times = 0
     self._sparse = check_param_type('sparse', sparse, bool)
Пример #3
0
    def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5,
                 max_eps=10.0, target_delta=1e-3, noise_decay_mode='Time',
                 noise_decay_rate=6e-4, per_print_times=50, dataset_sink_mode=False):
        super(ZCDPMonitor, self).__init__()
        check_int_positive('num_samples', num_samples)
        check_int_positive('batch_size', batch_size)
        if batch_size >= num_samples:
            msg = 'Batch_size must be less than num_samples.'
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        check_value_positive('initial_noise_multiplier',
                             initial_noise_multiplier)
        if noise_decay_mode is not None:
            if noise_decay_mode not in ('Step', 'Time', 'Exp'):
                msg = "Noise decay mode must be in ('Step', 'Time', 'Exp'), but got {}.".\
                    format(noise_decay_mode)
                LOGGER.error(TAG, msg)
                raise ValueError(msg)
            noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float)
            check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0)
        check_int_positive('per_print_times', per_print_times)
        check_param_type('dataset_sink_mode', dataset_sink_mode, bool)

        self._num_samples = num_samples
        self._batch_size = batch_size
        self._initial_noise_multiplier = initial_noise_multiplier
        self._max_eps = check_value_positive('max_eps', max_eps)
        self._target_delta = check_param_in_range('target_delta', target_delta, 0.0, 1.0)
        self._noise_decay_mode = noise_decay_mode
        self._noise_decay_rate = noise_decay_rate
        # initialize zcdp
        self._zcdp = 0
        self._per_print_times = per_print_times
        if dataset_sink_mode:
            self._per_print_times = int(self._num_samples / self._batch_size)
Пример #4
0
 def __init__(self, model, model_type='classification', targeted=False, reserve_ratio=0.3, sparse=True,
              step_size=0.5, per_bounds=0.6, c1=2.0, c2=2.0, c=2.0, pop_size=6, t_max=1000, pm=0.5, bounds=None):
     super(PSOAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._step_size = check_value_positive('step_size', step_size)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._c1 = check_value_positive('c1', c1)
     self._c2 = check_value_positive('c2', c2)
     self._c = check_value_positive('c', c)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._pm = check_value_non_negative('pm', pm)
     if self._pm > 1:
         msg = "pm should not be greater than 1.0, but got {}.".format(self._pm)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._targeted = check_param_type('targeted', targeted, bool)
     self._t_max = check_int_positive('t_max', t_max)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio)
     if self._reserve_ratio > 1:
         msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
Пример #5
0
 def __init__(self,
              network,
              eps=0.07,
              alpha=None,
              bounds=None,
              loss_fn=None):
     super(GradientMethod, self).__init__()
     self._network = check_model('network', network, Cell)
     self._eps = check_value_positive('eps', eps)
     self._dtype = None
     if bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds,
                                                [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     else:
         self._bounds = bounds
     if alpha is not None:
         self._alpha = check_value_positive('alpha', alpha)
     else:
         self._alpha = alpha
     if loss_fn is None:
         loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                 sparse=False)
     with_loss_cell = WithLossCell(self._network, loss_fn)
     self._grad_all = GradWrapWithLoss(with_loss_cell)
     self._grad_all.set_train()
Пример #6
0
 def __init__(self, norm_bound=0.5, initial_noise_multiplier=1.5, seed=0):
     super(GaussianRandom, self).__init__()
     self._norm_bound = check_value_positive('norm_bound', norm_bound)
     self._norm_bound = Tensor(norm_bound, mstype.float32)
     self._initial_noise_multiplier = check_value_positive(
         'initial_noise_multiplier', initial_noise_multiplier)
     self._initial_noise_multiplier = Tensor(initial_noise_multiplier,
                                             mstype.float32)
     self._mean = Tensor(0, mstype.float32)
     self._normal = P.Normal(seed=seed)
Пример #7
0
 def __init__(self, norm_bound=1.5, initial_noise_multiplier=5.0):
     super(GaussianRandom, self).__init__()
     self._norm_bound = check_value_positive('norm_bound', norm_bound)
     self._initial_noise_multiplier = check_value_positive(
         'initial_noise_multiplier',
         initial_noise_multiplier,
     )
     stddev = self._norm_bound * self._initial_noise_multiplier
     self._stddev = stddev
     self._mean = 0
Пример #8
0
 def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, decay_policy=None):
     super(NoiseGaussianRandom, self).__init__()
     norm_bound = check_param_type('norm_bound', norm_bound, float)
     self._norm_bound = check_value_positive('norm_bound', norm_bound)
     self._norm_bound = Tensor(norm_bound, mstype.float32)
     initial_noise_multiplier = check_param_type('initial_noise_multiplier', initial_noise_multiplier, float)
     self._initial_noise_multiplier = check_value_positive('initial_noise_multiplier',
                                                           initial_noise_multiplier)
     self._initial_noise_multiplier = Tensor(initial_noise_multiplier, mstype.float32)
     self._mean = Tensor(0, mstype.float32)
     if decay_policy is not None:
         raise ValueError('decay_policy must be None in GaussianRandom class, but got {}.'.format(decay_policy))
     self._decay_policy = decay_policy
     seed = check_param_type('seed', seed, int)
     self._seed = check_value_non_negative('seed', seed)
Пример #9
0
 def __init__(self,
              network,
              eps=1e-5,
              bounds=(0.0, 1.0),
              is_targeted=True,
              nb_iter=150,
              search_iters=30,
              loss_fn=None,
              sparse=False):
     super(LBFGS, self).__init__()
     self._network = check_model('network', network, Cell)
     self._eps = check_value_positive('eps', eps)
     self._is_targeted = check_param_type('is_targeted', is_targeted, bool)
     self._nb_iter = check_int_positive('nb_iter', nb_iter)
     self._search_iters = check_int_positive('search_iters', search_iters)
     if loss_fn is None:
         loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                 sparse=False)
     with_loss_cell = WithLossCell(self._network, loss_fn)
     self._grad_all = GradWrapWithLoss(with_loss_cell)
     self._dtype = None
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     self._sparse = check_param_type('sparse', sparse, bool)
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     box_max, box_min = bounds
     if box_max < box_min:
         self._box_min = box_max
         self._box_max = box_min
     else:
         self._box_min = box_min
         self._box_max = box_max
Пример #10
0
    def __init__(self, network, optimizer, sens=1.0, micro_batches=None, norm_clip=1.0, mech=None):
        super(_TrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.network.add_flags(defer_inline=True)
        self.weights = optimizer.parameters
        self.optimizer = optimizer
        self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.grad_reducer = None
        parallel_mode = _get_parallel_mode()
        if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
            self.reducer_flag = True
        if self.reducer_flag:
            mean = _get_mirror_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

        # dp params
        self._micro_batches = micro_batches
        norm_clip = check_param_type('norm_clip', norm_clip, float)
        self._l2_norm = check_value_positive('norm_clip', norm_clip)
        self._split = P.Split(0, self._micro_batches)
        self._clip_by_global_norm = _ClipGradients()
        self._mech = mech
        self._tuple_add = _TupleAdd()
        self._hyper_map = C.HyperMap()
        self._micro_float = Tensor(micro_batches, mstype.float32)
Пример #11
0
    def __init__(self, decay_policy='Linear', learning_rate=0.001,
                 target_unclipped_quantile=0.9, fraction_stddev=0.01, seed=0):
        super(AdaClippingWithGaussianRandom, self).__init__()
        if decay_policy not in ['Linear', 'Geometric']:
            msg = "decay policy of adaptive clip must be in ['Linear', 'Geometric'], \
                but got: {}".format(decay_policy)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        self._decay_policy = decay_policy
        learning_rate = check_param_type('learning_rate', learning_rate, float)
        learning_rate = check_value_positive('learning_rate', learning_rate)
        self._learning_rate = Tensor(learning_rate, mstype.float32)
        fraction_stddev = check_param_type('fraction_stddev', fraction_stddev, float)
        self._fraction_stddev = Tensor(fraction_stddev, mstype.float32)
        target_unclipped_quantile = check_param_type('target_unclipped_quantile',
                                                     target_unclipped_quantile,
                                                     float)
        self._target_unclipped_quantile = Tensor(target_unclipped_quantile,
                                                 mstype.float32)

        self._zero = Tensor(0, mstype.float32)
        self._add = P.TensorAdd()
        self._sub = P.Sub()
        self._mul = P.Mul()
        self._exp = P.Exp()
        seed = check_param_type('seed', seed, int)
        self._seed = check_value_non_negative('seed', seed)
Пример #12
0
    def set_threshold(self, threshold):
        """
        Set the parameters threshold.

        Args:
            threshold (float): Detection threshold. Default: None.
        """
        self._threshold = check_value_positive('threshold', threshold)
 def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), nb_iter=5,
              loss_fn=None):
     super(IterativeGradientMethod, self).__init__()
     self._network = check_model('network', network, Cell)
     self._eps = check_value_positive('eps', eps)
     self._eps_iter = check_value_positive('eps_iter', eps_iter)
     self._nb_iter = check_int_positive('nb_iter', nb_iter)
     self._bounds = None
     if bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     if loss_fn is None:
         self._loss_grad = network
     else:
         self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn))
     self._loss_grad.set_train()
Пример #14
0
 def __init__(self,
              model,
              number_points=10,
              initial_radius=0.0,
              max_radius=1.0,
              search_step=0.01,
              degrade_limit=0.0,
              sparse=False):
     super(RegionBasedDetector, self).__init__()
     self._model = check_model('targeted model', model, Model)
     self._number_points = check_int_positive('number_points',
                                              number_points)
     self._initial_radius = check_value_non_negative(
         'initial_radius', initial_radius)
     self._max_radius = check_value_positive('max_radius', max_radius)
     self._search_step = check_value_positive('search_step', search_step)
     self._degrade_limit = check_value_non_negative('degrade_limit',
                                                    degrade_limit)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._radius = None
Пример #15
0
 def __init__(self,
              network,
              input_shape,
              input_bound,
              loss_weights=(1, 0.2, 5)):
     self._network = check_param_type('network', network, Cell)
     for sub_loss_weight in loss_weights:
         check_value_positive('sub_loss_weight', sub_loss_weight)
     self._loss = InversionLoss(self._network, loss_weights)
     self._input_shape = check_param_type('input_shape', input_shape, tuple)
     for shape_dim in input_shape:
         check_int_positive('shape_dim', shape_dim)
     self._input_bound = check_param_multi_types('input_bound', input_bound,
                                                 [list, tuple])
     for value_bound in self._input_bound:
         check_param_multi_types('value_bound', value_bound, [float, int])
     if self._input_bound[0] > self._input_bound[1]:
         msg = 'input_bound[0] should not be larger than input_bound[1], but got them as {} and {}'.format(
             self._input_bound[0], self._input_bound[1])
         raise ValueError(msg)
Пример #16
0
    def set_threshold(self, num_of_neighbors, threshold):
        """
        Set the parameters num_of_neighbors and threshold.

        Args:
            num_of_neighbors (int): Number of the nearest neighbors.
            threshold (float): Detection threshold. Default: None.
        """
        self._num_of_neighbors = check_int_positive('num_of_neighbors',
                                                    num_of_neighbors)
        self._threshold = check_value_positive('threshold', threshold)
 def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0),
              is_targeted=False, nb_iter=5, decay_factor=1.0,
              norm_level='inf', loss_fn=None):
     super(MomentumIterativeMethod, self).__init__(network,
                                                   eps=eps,
                                                   eps_iter=eps_iter,
                                                   bounds=bounds,
                                                   nb_iter=nb_iter,
                                                   loss_fn=loss_fn)
     self._is_targeted = check_param_type('is_targeted', is_targeted, bool)
     self._decay_factor = check_value_positive('decay_factor', decay_factor)
     self._norm_level = check_norm_level(norm_level)
Пример #18
0
 def __init__(self, model, model_type='classification', targeted=True, reserve_ratio=0.3, sparse=True,
              pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3,
              bounds=(0, 1.0), adaptive=False, c=0.1):
     super(GeneticAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._targeted = check_param_type('targeted', targeted, bool)
     self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio)
     if self._reserve_ratio > 1:
         msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._step_size = check_value_positive('step_size', step_size)
     self._temp = check_value_positive('temp', temp)
     self._max_steps = check_int_positive('max_steps', max_steps)
     self._mutation_rate = check_value_non_negative('mutation_rate', mutation_rate)
     if self._mutation_rate > 1:
         msg = "mutation_rate should not be greater than 1.0, but got {}.".format(self._mutation_rate)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._adaptive = check_param_type('adaptive', adaptive, bool)
     # initial global optimum fitness value
     self._best_fit = -np.inf
     # count times of no progress
     self._plateau_times = 0
     # count times of changing attack step_size
     self._adap_times = 0
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._c = check_value_positive('c', c)
Пример #19
0
 def __init__(self,
              model,
              step_size=0.5,
              per_bounds=0.6,
              c1=2.0,
              c2=2.0,
              c=2.0,
              pop_size=6,
              t_max=1000,
              pm=0.5,
              bounds=None,
              targeted=False,
              reduction_iters=3,
              sparse=True):
     super(PSOAttack, self).__init__()
     self._model = check_model('model', model, BlackModel)
     self._step_size = check_value_positive('step_size', step_size)
     self._per_bounds = check_value_positive('per_bounds', per_bounds)
     self._c1 = check_value_positive('c1', c1)
     self._c2 = check_value_positive('c2', c2)
     self._c = check_value_positive('c', c)
     self._pop_size = check_int_positive('pop_size', pop_size)
     self._pm = check_value_positive('pm', pm)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
     self._targeted = check_param_type('targeted', targeted, bool)
     self._t_max = check_int_positive('t_max', t_max)
     self._reduce_iters = check_int_positive('reduction_iters',
                                             reduction_iters)
     self._sparse = check_param_type('sparse', sparse, bool)
Пример #20
0
 def __init__(self,
              network,
              num_classes,
              box_min=0.0,
              box_max=1.0,
              bin_search_steps=5,
              max_iterations=1000,
              confidence=0,
              learning_rate=5e-3,
              initial_const=1e-2,
              abort_early_check_ratio=5e-2,
              targeted=False,
              fast=True,
              abort_early=True,
              sparse=True):
     LOGGER.info(TAG, "init CW object.")
     super(CarliniWagnerL2Attack, self).__init__()
     self._network = check_model('network', network, Cell)
     self._network.set_grad(True)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._min = check_param_type('box_min', box_min, float)
     self._max = check_param_type('box_max', box_max, float)
     self._bin_search_steps = check_int_positive('search_steps',
                                                 bin_search_steps)
     self._max_iterations = check_int_positive('max_iterations',
                                               max_iterations)
     self._confidence = check_param_multi_types('confidence', confidence,
                                                [int, float])
     self._learning_rate = check_value_positive('learning_rate',
                                                learning_rate)
     self._initial_const = check_value_positive('initial_const',
                                                initial_const)
     self._abort_early = check_param_type('abort_early', abort_early, bool)
     self._fast = check_param_type('fast', fast, bool)
     self._abort_early_check_ratio = check_value_positive(
         'abort_early_check_ratio', abort_early_check_ratio)
     self._targeted = check_param_type('targeted', targeted, bool)
     self._net_grad = GradWrap(self._network)
     self._sparse = check_param_type('sparse', sparse, bool)
     self._dtype = None
Пример #21
0
    def __init__(self, network, optimizer, scale_update_cell=None, micro_batches=None, norm_clip=1.0, mech=None):
        super(_TrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.network.add_flags(defer_inline=True)
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
        self.hyper_map = C.HyperMap()
        if context.get_context("device_target") == "GPU":
            self.gpu_target = True
            self.float_status = P.FloatStatus()
            self.addn = P.AddN()
            self.reshape = P.Reshape()
        else:
            self.gpu_target = False
            self.alloc_status = NPUAllocFloatStatus()
            self.get_status = NPUGetFloatStatus()
            self.clear_status = NPUClearFloatStatus()
        self.reduce_sum = ReduceSum(keep_dims=False)
        self.base = Tensor(1, mstype.float32)
        self.less_equal = LessEqual()
        self.depend_parameter_use = ControlDepend(depend_mode=1)
        self.allreduce = P.AllReduce()
        self.parallel_mode = _get_parallel_mode()
        self.grad_reducer = F.identity
        self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]
        if self.reducer_flag:
            mean = _get_mirror_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
        self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE

        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
        self.add_flags(has_effect=True)

        # dp params
        self._micro_batches = micro_batches
        norm_clip = check_param_type('norm_clip', norm_clip, float)
        self._l2_norm = check_value_positive('norm_clip', norm_clip)
        self._split = P.Split(0, self._micro_batches)
        self._clip_by_global_norm = _ClipGradients()
        self._mech = mech
        self._tuple_add = _TupleAdd()
        self._hyper_map = C.HyperMap()
        self._micro_float = Tensor(micro_batches, mstype.float32)
Пример #22
0
 def __init__(self, network, num_classes, max_iters=50, overshoot=0.02,
              norm_level=2, bounds=None, sparse=True):
     super(DeepFool, self).__init__()
     self._network = check_model('network', network, Cell)
     self._network.set_grad(True)
     self._max_iters = check_int_positive('max_iters', max_iters)
     self._overshoot = check_value_positive('overshoot', overshoot)
     self._norm_level = check_norm_level(norm_level)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._net_grad = GradWrap(self._network)
     self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
     self._sparse = check_param_type('sparse', sparse, bool)
     for b in self._bounds:
         _ = check_param_multi_types('bound', b, [int, float])
Пример #23
0
    def __init__(self,
                 micro_batches=2,
                 norm_bound=1.0,
                 noise_mech=None,
                 clip_mech=None,
                 **kwargs):
        if micro_batches:
            self._micro_batches = check_int_positive('micro_batches',
                                                     micro_batches)
        else:
            self._micro_batches = None
        norm_bound = check_param_type('norm_bound', norm_bound, float)
        norm_bound = check_value_positive('norm_bound', norm_bound)
        norm_bound = Tensor(norm_bound, mstype.float32)
        self._norm_bound = Parameter(norm_bound, 'norm_bound')

        opt = kwargs['optimizer']
        opt_name = opt.__class__.__name__
        # Check whether noise_mech and DPOptimizer are both None or not None, if so, raise ValueError.
        # And check whether noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None,
        # if so, rasie ValuerError too.
        if noise_mech is not None and "DPOptimizer" in opt_name:
            msg = 'DPOptimizer is not supported while noise_mech is not None'
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        if noise_mech is None:
            if "DPOptimizer" in opt_name:
                if context.get_context('mode') != context.PYNATIVE_MODE:
                    msg = 'DPOptimizer just support pynative mode currently.'
                    LOGGER.error(TAG, msg)
                    raise ValueError(msg)
                if 'Ada' in opt._mech.__class__.__name__ and clip_mech is not None:
                    msg = "When DPOptimizer's mech method is adaptive, clip_mech must be None."
                    LOGGER.error(TAG, msg)
                    raise ValueError(msg)
            else:
                msg = 'DPModel should set noise_mech or DPOptimizer configure, ' \
                      'please refer to example.'
                LOGGER.error(TAG, msg)
                raise ValueError(msg)
        self._noise_mech = noise_mech
        if noise_mech is not None:
            if 'Ada' in noise_mech.__class__.__name__ and clip_mech is not None:
                msg = 'When noise_mech is Adaptive, clip_mech must be None.'
                LOGGER.error(TAG, msg)
                raise ValueError(msg)

        if clip_mech is None or isinstance(clip_mech, Cell):
            self._clip_mech = clip_mech
        super(DPModel, self).__init__(**kwargs)
Пример #24
0
    def __init__(self,
                 norm_bound=1.0,
                 initial_noise_multiplier=1.5,
                 noise_decay_rate=6e-4,
                 decay_policy='Time',
                 seed=0):
        super(AdaGaussianRandom, self).__init__()
        norm_bound = check_value_positive('norm_bound', norm_bound)
        initial_noise_multiplier = check_value_positive(
            'initial_noise_multiplier', initial_noise_multiplier)
        self._norm_bound = Tensor(norm_bound, mstype.float32)

        initial_noise_multiplier = Tensor(initial_noise_multiplier,
                                          mstype.float32)
        self._initial_noise_multiplier = Parameter(
            initial_noise_multiplier, name='initial_noise_multiplier')
        self._noise_multiplier = Parameter(initial_noise_multiplier,
                                           name='noise_multiplier')
        self._mean = Tensor(0, mstype.float32)
        noise_decay_rate = check_param_type('noise_decay_rate',
                                            noise_decay_rate, float)
        check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0)
        self._noise_decay_rate = Tensor(noise_decay_rate, mstype.float32)
        if decay_policy not in ['Time', 'Step']:
            raise NameError(
                "The decay_policy must be in ['Time', 'Step'], but "
                "get {}".format(decay_policy))
        self._decay_policy = decay_policy
        self._sub = P.Sub()
        self._mul = P.Mul()
        self._add = P.TensorAdd()
        self._div = P.Div()
        self._dtype = mstype.float32
        self._normal = P.Normal(seed=seed)
        self._assign = P.Assign()
        self._one = Tensor(1, self._dtype)
Пример #25
0
 def __init__(self, network, num_classes, box_min=0.0, box_max=1.0,
              theta=1.0, max_iteration=1000, max_count=3, increase=True,
              sparse=True):
     super(JSMAAttack).__init__()
     LOGGER.debug(TAG, "init jsma class.")
     self._network = check_model('network', network, Cell)
     self._min = check_value_non_negative('box_min', box_min)
     self._max = check_value_non_negative('box_max', box_max)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._theta = check_value_positive('theta', theta)
     self._max_iter = check_int_positive('max_iteration', max_iteration)
     self._max_count = check_int_positive('max_count', max_count)
     self._increase = check_param_type('increase', increase, bool)
     self._net_grad = GradWrap(self._network)
     self._bit_map = None
     self._sparse = check_param_type('sparse', sparse, bool)
    def __init__(self,
                 model,
                 init_num_evals=100,
                 max_num_evals=1000,
                 stepsize_search='geometric_progression',
                 num_iterations=20,
                 gamma=1.0,
                 constraint='l2',
                 batch_size=32,
                 clip_min=0.0,
                 clip_max=1.0,
                 sparse=True):
        super(HopSkipJumpAttack, self).__init__()
        self._model = check_model('model', model, BlackModel)
        self._init_num_evals = check_int_positive('initial_num_evals',
                                                  init_num_evals)
        self._max_num_evals = check_int_positive('max_num_evals',
                                                 max_num_evals)
        self._batch_size = check_int_positive('batch_size', batch_size)
        self._clip_min = check_value_non_negative('clip_min', clip_min)
        self._clip_max = check_value_non_negative('clip_max', clip_max)
        self._sparse = check_param_type('sparse', sparse, bool)
        self._np_dtype = np.dtype('float32')
        if stepsize_search in ['geometric_progression', 'grid_search']:
            self._stepsize_search = stepsize_search
        else:
            msg = "stepsize_search must be in ['geometric_progression'," \
                  " 'grid_search'], but got {}".format(stepsize_search)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)

        self._num_iterations = check_int_positive('num_iterations',
                                                  num_iterations)
        self._gamma = check_value_positive('gamma', gamma)
        if constraint in ['l2', 'linf']:
            self._constraint = constraint
        else:
            msg = "constraint must be in ['l2', 'linf'], " \
                  "but got {}".format(constraint)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        self.queries = 0
        self.is_adv = True
        self.y_targets = None
        self.image_targets = None
        self.y_target = None
        self.image_target = None
Пример #27
0
 def __init__(self, micro_batches=2, norm_clip=1.0, mech=None, **kwargs):
     if micro_batches:
         self._micro_batches = check_int_positive('micro_batches', micro_batches)
     else:
         self._micro_batches = None
     norm_clip = check_param_type('norm_clip', norm_clip, float)
     self._norm_clip = check_value_positive('norm_clip', norm_clip)
     if mech is not None and "DPOptimizer" in kwargs['optimizer'].__class__.__name__:
         raise ValueError('DPOptimizer is not supported while mech is not None')
     if mech is None:
         if "DPOptimizer" in kwargs['optimizer'].__class__.__name__:
             if context.get_context('mode') != context.PYNATIVE_MODE:
                 raise ValueError('DPOptimizer just support pynative mode currently.')
         else:
             raise ValueError('DPModel should set mech or DPOptimizer configure, please refer to example.')
     self._mech = mech
     super(DPModel, self).__init__(**kwargs)
Пример #28
0
 def __init__(self,
              layer_name,
              grad_idx,
              is_add_noise,
              is_lower_clip,
              min_num,
              upper_bound=1.20):
     self.layer_name = check_param_type('layer_name', layer_name, str)
     check_param_type('grad_idx', grad_idx, int)
     self.grad_idx = check_value_non_negative('grad_idx', grad_idx)
     self.is_add_noise = check_param_type('is_add_noise', is_add_noise,
                                          bool)
     self.is_lower_clip = check_param_type('is_lower_clip', is_lower_clip,
                                           bool)
     self.min_num = check_param_type('min_num', min_num, int)
     self.upper_bound = check_value_positive('upper_bound', upper_bound)
     self.inited = False
Пример #29
0
    def __init__(self,
                 array,
                 is_add_noise,
                 is_lower_clip,
                 min_num,
                 upper_bound=1.20):
        super(GradMaskInCell, self).__init__()
        self.mul_mask_array_shape = array.shape
        mul_mask_array = array.copy()
        self.mul_mask_array_flat = mul_mask_array.flatten()
        self.mul_mask_tensor = Tensor(array, mstype.float32)
        self.mask_able = False
        self.is_add_noise = is_add_noise
        self.is_lower_clip = is_lower_clip
        self.min_num = min_num
        self.upper_bound = max(
            0.10, check_value_positive('upper_bound', upper_bound))

        self.para_num = array.size
        self.is_approximity = False
        self.sparse_pos_list = [0]
        self.part_num = 1
        self.part_size = self.para_num
        self.part_num_max = 16
        self.para_many_num = 10000
        self.para_huge_num = 10 * 10000 * 10000

        if self.para_num > self.para_many_num:
            self.is_approximity = True
            self.is_add_noise = False
            self.is_lower_clip = False

            ratio = 2
            if self.part_size > self.para_huge_num:
                while self.part_size % ratio == 0 and self.part_size > self.para_huge_num \
                        and self.part_num < self.part_num_max:
                    self.part_num = self.part_num * ratio
                    self.part_size = int(self.part_size / ratio)
            msg = "this layer has {} para, disable the operation of clipping lower, clipping upper_bound, " \
                  "adding noise. \n part_num={}, part_size={}" \
                .format(self.para_num, self.part_num, self.part_size)
            LOGGER.info(TAG, msg)
Пример #30
0
 def __init__(self,
              network,
              num_classes,
              model_type='classification',
              reserve_ratio=0.3,
              max_iters=50,
              overshoot=0.02,
              norm_level=2,
              bounds=None,
              sparse=True):
     super(DeepFool, self).__init__()
     self._network = check_model('network', network, Cell)
     self._max_iters = check_int_positive('max_iters', max_iters)
     self._overshoot = check_value_positive('overshoot', overshoot)
     self._norm_level = check_norm_level(norm_level)
     self._num_classes = check_int_positive('num_classes', num_classes)
     self._net_grad = GradWrap(self._network)
     self._bounds = bounds
     if self._bounds is not None:
         self._bounds = check_param_multi_types('bounds', bounds,
                                                [list, tuple])
         for b in self._bounds:
             _ = check_param_multi_types('bound', b, [int, float])
     self._sparse = check_param_type('sparse', sparse, bool)
     self._model_type = check_param_type('model_type', model_type, str)
     if self._model_type not in ('classification', 'detection'):
         msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(
             self._model_type)
         LOGGER.error(TAG, msg)
         raise ValueError(msg)
     self._reserve_ratio = check_value_non_negative('reserve_ratio',
                                                    reserve_ratio)
     if self._reserve_ratio > 1:
         msg = 'reserve_ratio should be less than 1.0, but got {}.'.format(
             self._reserve_ratio)
         LOGGER.error(TAG, msg)
         raise ValueError(TAG, msg)