def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5, max_eps=10.0, target_delta=1e-3, noise_decay_mode='Time', noise_decay_rate=6e-4, per_print_times=50, dataset_sink_mode=False): super(ZCDPMonitor, self).__init__() check_int_positive('num_samples', num_samples) check_int_positive('batch_size', batch_size) if batch_size >= num_samples: msg = 'Batch_size must be less than num_samples.' LOGGER.error(TAG, msg) raise ValueError(msg) check_value_positive('initial_noise_multiplier', initial_noise_multiplier) if noise_decay_mode is not None: if noise_decay_mode not in ('Step', 'Time', 'Exp'): msg = "Noise decay mode must be in ('Step', 'Time', 'Exp'), but got {}.".\ format(noise_decay_mode) LOGGER.error(TAG, msg) raise ValueError(msg) noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0) check_int_positive('per_print_times', per_print_times) check_param_type('dataset_sink_mode', dataset_sink_mode, bool) self._num_samples = num_samples self._batch_size = batch_size self._initial_noise_multiplier = initial_noise_multiplier self._max_eps = check_value_positive('max_eps', max_eps) self._target_delta = check_param_in_range('target_delta', target_delta, 0.0, 1.0) self._noise_decay_mode = noise_decay_mode self._noise_decay_rate = noise_decay_rate # initialize zcdp self._zcdp = 0 self._per_print_times = per_print_times if dataset_sink_mode: self._per_print_times = int(self._num_samples / self._batch_size)
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, noise_decay_rate=6e-6, decay_policy='Exp'): super(NoiseAdaGaussianRandom, self).__init__(norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, seed=seed) self._noise_multiplier = Parameter(self._initial_noise_multiplier, name='noise_multiplier') noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0) self._noise_decay_rate = Tensor(noise_decay_rate, mstype.float32) if decay_policy not in ['Time', 'Step', 'Exp']: raise NameError("The decay_policy must be in ['Time', 'Step', 'Exp'], but " "get {}".format(decay_policy)) self._decay_policy = decay_policy
def set_radius(self, radius): """ Set radius. Args: radius (float): Radius of region. """ self._radius = check_param_in_range('radius', radius, self._initial_radius, self._max_radius)
def __init__(self, auto_encoder, false_positive_rate=0.01, bounds=(0.0, 1.0)): super(ErrorBasedDetector, self).__init__() self._auto_encoder = check_model('auto_encoder', auto_encoder, Model) self._false_positive_rate = check_param_in_range('false_positive_rate', false_positive_rate, 0, 1) self._threshold = 0.0 self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float])
def set_params(self, x_bias=0, y_bias=0, auto_param=False): """ Set translate parameters. Args: x_bias (Union[float, int]): X-direction translation, and x_bias should be in range of (-1, 1). Default: 0. y_bias (Union[float, int]): Y-direction translation, and y_bias should be in range of (-1, 1). Default: 0. auto_param (bool): True if auto generate parameters. Default: False. """ x_bias = check_param_in_range('x_bias', x_bias, -1, 1) y_bias = check_param_in_range('y_bias', y_bias, -1, 1) self.auto_param = auto_param if auto_param: self.x_bias = np.random.uniform(-0.3, 0.3) self.y_bias = np.random.uniform(-0.3, 0.3) else: self.x_bias = check_param_multi_types('x_bias', x_bias, [int, float]) self.y_bias = check_param_multi_types('y_bias', y_bias, [int, float])
def __init__(self, model, ksize=3, is_local_smooth=True, metric='l1', false_positive_ratio=0.05): super(SpatialSmoothing, self).__init__() self._ksize = check_int_positive('ksize', ksize) self._is_local_smooth = check_param_type('is_local_smooth', is_local_smooth, bool) self._model = check_model('model', model, Model) self._metric = metric self._fpr = check_param_in_range('false_positive_ratio', false_positive_ratio, 0, 1) self._threshold = None
def __init__(self, network, attacks, loss_fn=None, optimizer=None, bounds=(0.0, 1.0), replace_ratio=0.5): super(AdversarialDefenseWithAttacks, self).__init__(network, loss_fn, optimizer) self._attacks = check_param_type('attacks', attacks, list) self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) for elem in self._bounds: _ = check_param_multi_types('bound', elem, [int, float]) self._replace_ratio = check_param_in_range('replace_ratio', replace_ratio, 0, 1) self._graph_initialized = False
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5, noise_decay_rate=6e-4, decay_policy='Time', seed=0): super(AdaGaussianRandom, self).__init__() norm_bound = check_value_positive('norm_bound', norm_bound) initial_noise_multiplier = check_value_positive( 'initial_noise_multiplier', initial_noise_multiplier) self._norm_bound = Tensor(norm_bound, mstype.float32) initial_noise_multiplier = Tensor(initial_noise_multiplier, mstype.float32) self._initial_noise_multiplier = Parameter( initial_noise_multiplier, name='initial_noise_multiplier') self._noise_multiplier = Parameter(initial_noise_multiplier, name='noise_multiplier') self._mean = Tensor(0, mstype.float32) noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0) self._noise_decay_rate = Tensor(noise_decay_rate, mstype.float32) if decay_policy not in ['Time', 'Step']: raise NameError( "The decay_policy must be in ['Time', 'Step'], but " "get {}".format(decay_policy)) self._decay_policy = decay_policy self._sub = P.Sub() self._mul = P.Mul() self._add = P.TensorAdd() self._div = P.Div() self._dtype = mstype.float32 self._normal = P.Normal(seed=seed) self._assign = P.Assign() self._one = Tensor(1, self._dtype)
def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, max_buffer_size=10000, tuning=False, fpr=0.001): super(SimilarityDetector, self).__init__() self._max_k_neighbor = check_int_positive('max_k_neighbor', max_k_neighbor) self._trans_model = check_model('trans_model', trans_model, Model) self._tuning = check_param_type('tuning', tuning, bool) self._chunk_size = check_int_positive('chunk_size', chunk_size) self._max_buffer_size = check_int_positive('max_buffer_size', max_buffer_size) self._fpr = check_param_in_range('fpr', fpr, 0, 1) self._num_of_neighbors = None self._threshold = None self._num_queries = 0 # Stores recently processed queries self._buffer = [] # Tracks indexes of detected queries self._detected_queries = []
def concept_check(self, data): """ Find concept drift locations in a data series. Args: data(numpy.ndarray): Input data. The shape of data could be (n,1) or (n,m). Note that each column (m columns) is one data series. Returns: - numpy.ndarray, the concept drift score of the example series. - float, the threshold to judge concept drift. - list, the location of the concept drift. Examples: >>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10, >>> step=10, threshold_index=1.5, need_label=False) >>> data_example = 5*np.random.rand(1000) >>> data_example[200: 800] = 20*np.random.rand(600) >>> score, drift_threshold, drift_location = concept.concept_check(data_example) """ # data check data = _check_array_not_empty('data', data) data = check_param_type('data', data, np.ndarray) check_param_in_range('window_size', self.window_size, 10, int((1 / 3)*len(data))) check_param_in_range('rolling_window', self.rolling_window, 1, self.window_size) check_param_in_range('step', self.step, 1, self.window_size) original_data = data data = self._data_process(data) # calculate drift score drift_score = np.zeros(len(data)) step_size = self.step for i in range(0, len(data) - 2*self.window_size, step_size): data_x = data[i: i + self.window_size] data_y = data[i + self.window_size:i + 2*self.window_size] drift_score[i + self.window_size] = self._concept_distance(data_x, data_y) threshold = _cal_threshold(drift_score, self.threshold_index) # original label label, label_location = _original_label(data, threshold, drift_score, self.window_size, step_size) # label continue label_continue = _label_continue_process(label) # find drift blocks concept_drift_location, drift_point = _drift_blocks(drift_score, label_continue, label_location) # save result _result_save(original_data, threshold, concept_drift_location, drift_point, drift_score) return drift_score, threshold, concept_drift_location
def _check_attack_params(self, method, params): """Check input parameters of attack methods.""" allow_params = self._attack_param_checklists[method].keys() for param_name in params: if param_name not in allow_params: msg = "parameters of {} must in {}".format( method, allow_params) raise ValueError(msg) check_param_type(param_name, params[param_name], list) for param_value in params[param_name]: if param_name == 'bounds': bounds = check_param_multi_types('bounds', param_value, [tuple]) if len(bounds) != 2: msg = 'The format of bounds must be format (lower_bound, upper_bound),' \ 'but got its length as{}'.format(len(bounds)) raise ValueError(msg) for bound_value in bounds: _ = check_param_multi_types('bound', bound_value, [int, float]) if bounds[0] >= bounds[1]: msg = "upper bound must more than lower bound, " \ "but upper bound got {}, lower bound " \ "got {}".format(bounds[0], bounds[1]) raise ValueError(msg) elif param_name == 'norm_level': _ = check_norm_level(param_value) else: allow_type = self._attack_param_checklists[method][ param_name]['dtype'] allow_range = self._attack_param_checklists[method][ param_name]['range'] _ = check_param_multi_types(str(param_name), param_value, allow_type) _ = check_param_in_range(str(param_name), param_value, allow_range[0], allow_range[1])
def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5, max_eps=10.0, target_delta=1e-3, max_delta=None, target_eps=None, orders=None, noise_decay_mode='Time', noise_decay_rate=6e-4, per_print_times=50, dataset_sink_mode=False): super(RDPMonitor, self).__init__() check_int_positive('num_samples', num_samples) check_int_positive('batch_size', batch_size) if batch_size >= num_samples: msg = 'Batch_size must be less than num_samples.' LOGGER.error(TAG, msg) raise ValueError(msg) check_value_positive('initial_noise_multiplier', initial_noise_multiplier) if max_eps is not None: check_value_positive('max_eps', max_eps) if target_delta is not None: check_value_positive('target_delta', target_delta) if max_delta is not None: check_value_positive('max_delta', max_delta) if max_delta >= 1: msg = 'max_delta must be less than 1.' LOGGER.error(TAG, msg) raise ValueError(msg) if target_eps is not None: check_value_positive('target_eps', target_eps) if orders is not None: for item in orders: check_value_positive('order', item) if item <= 1: msg = 'orders must be greater than 1' LOGGER.error(TAG, msg) raise ValueError(msg) if noise_decay_mode is not None: if noise_decay_mode not in ('Step', 'Time', 'Exp'): msg = "Noise decay mode must be in ('Step', 'Time', 'Exp')" LOGGER.error(TAG, msg) raise ValueError(msg) noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0) check_int_positive('per_print_times', per_print_times) check_param_type('dataset_sink_mode', dataset_sink_mode, bool) self._num_samples = num_samples self._batch_size = batch_size self._initial_noise_multiplier = initial_noise_multiplier self._max_eps = max_eps self._target_delta = target_delta self._max_delta = max_delta self._target_eps = target_eps self._orders = orders self._noise_decay_mode = noise_decay_mode self._noise_decay_rate = noise_decay_rate self._rdp = 0 self._per_print_times = per_print_times if self._target_eps is None and self._target_delta is None: msg = 'target eps and target delta cannot both be None' LOGGER.error(TAG, msg) raise ValueError(msg) if self._target_eps is not None and self._target_delta is not None: msg = 'One of target eps and target delta must be None' LOGGER.error(TAG, msg) raise ValueError(msg) if dataset_sink_mode: self._per_print_times = int(self._num_samples / self._batch_size)
def set_radius(self, radius): """Set radius.""" self._radius = check_param_in_range('radius', radius, self._initial_radius, self._max_radius)