def eval(self, dataset_train, dataset_test, metrics): """ Evaluate the different privacy of the target model. Evaluation indicators shall be specified by metrics. Args: dataset_train (mindspore.dataset): The training dataset for the target model. dataset_test (mindspore.dataset): The test dataset for the target model. metrics (Union[list, tuple]): Evaluation indicators. The value of metrics must be in ["precision", "accuracy", "recall"]. Default: ["precision"]. Returns: list, each element contains an evaluation indicator for the attack model. """ check_param_type("dataset_train", dataset_train, Dataset) check_param_type("dataset_test", dataset_test, Dataset) check_param_multi_types("metrics", metrics, (list, tuple)) metrics = set(metrics) metrics_list = {"precision", "accuracy", "recall"} if not metrics <= metrics_list: msg = "Element in 'metrics' must be in {}, but got {}.".format( metrics_list, metrics) LOGGER.error(TAG, msg) raise ValueError(msg) result = [] features, labels = self._transform(dataset_train, dataset_test) for attacker in self._attack_list: pred = attacker.predict(features) item = {} for option in metrics: item[option] = _eval_info(pred, labels, option) result.append(item) return result
def __init__(self, network, eps=1e-5, bounds=(0.0, 1.0), is_targeted=True, nb_iter=150, search_iters=30, loss_fn=None, sparse=False): super(LBFGS, self).__init__() self._network = check_model('network', network, Cell) self._eps = check_value_positive('eps', eps) self._is_targeted = check_param_type('is_targeted', is_targeted, bool) self._nb_iter = check_int_positive('nb_iter', nb_iter) self._search_iters = check_int_positive('search_iters', search_iters) if loss_fn is None: loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) with_loss_cell = WithLossCell(self._network, loss_fn) self._grad_all = GradWrapWithLoss(with_loss_cell) self._dtype = None self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) self._sparse = check_param_type('sparse', sparse, bool) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) box_max, box_min = bounds if box_max < box_min: self._box_min = box_max self._box_max = box_min else: self._box_min = box_min self._box_max = box_max
def set_params(self, factor_x=0, factor_y=0, auto_param=False): """ Set shear parameters. Args: factor_x (Union[float, int]): Shear factor of horizontal direction. Default: 0. factor_y (Union[float, int]): Shear factor of vertical direction. Default: 0. auto_param (bool): True if auto generate parameters. Default: False. """ if factor_x != 0 and factor_y != 0: msg = 'At least one of factor_x and factor_y is zero.' LOGGER.error(TAG, msg) raise ValueError(msg) if auto_param: if np.random.uniform(-1, 1) > 0: self.factor_x = np.random.uniform(-2, 2) self.factor_y = 0 else: self.factor_x = 0 self.factor_y = np.random.uniform(-2, 2) else: self.factor_x = check_param_multi_types('factor', factor_x, [int, float]) self.factor_y = check_param_multi_types('factor', factor_y, [int, float])
def __init__(self, network, eps=0.07, alpha=None, bounds=None, loss_fn=None): super(GradientMethod, self).__init__() self._network = check_model('network', network, Cell) self._eps = check_value_positive('eps', eps) self._dtype = None if bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) else: self._bounds = bounds if alpha is not None: self._alpha = check_value_positive('alpha', alpha) else: self._alpha = alpha if loss_fn is None: loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) with_loss_cell = WithLossCell(self._network, loss_fn) self._grad_all = GradWrapWithLoss(with_loss_cell) self._grad_all.set_train()
def train(self, dataset_train, dataset_test, attack_config): """ Depending on the configuration, use the input dataset to train the attack model. Save the attack model to self._attack_list. Args: dataset_train (mindspore.dataset): The training dataset for the target model. dataset_test (mindspore.dataset): The test set for the target model. attack_config (Union[list, tuple]): Parameter setting for the attack model. The format is [{"method": "knn", "params": {"n_neighbors": [3, 5, 7]}}, {"method": "lr", "params": {"C": np.logspace(-4, 2, 10)}}]. The support methods are knn, lr, mlp and rf, and the params of each method must within the range of changeable parameters. Tips of params implement can be found below: `KNN <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_, `LR <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_, `RF <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_, `MLP <https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html>`_. Raises: KeyError: If any config in attack_config doesn't have keys {"method", "params"}. NameError: If the method(case insensitive) in attack_config is not in ["lr", "knn", "rf", "mlp"]. """ check_param_type("dataset_train", dataset_train, Dataset) check_param_type("dataset_test", dataset_test, Dataset) check_param_multi_types("attack_config", attack_config, (list, tuple)) verify_config_params(attack_config) features, labels = self._transform(dataset_train, dataset_test) for config in attack_config: self._attack_list.append( get_attack_model(features, labels, config, n_jobs=self._n_jobs))
def __init__(self, model, step_size=0.5, per_bounds=0.6, c1=2.0, c2=2.0, c=2.0, pop_size=6, t_max=1000, pm=0.5, bounds=None, targeted=False, reduction_iters=3, sparse=True): super(PSOAttack, self).__init__() self._model = check_model('model', model, BlackModel) self._step_size = check_value_positive('step_size', step_size) self._per_bounds = check_value_positive('per_bounds', per_bounds) self._c1 = check_value_positive('c1', c1) self._c2 = check_value_positive('c2', c2) self._c = check_value_positive('c', c) self._pop_size = check_int_positive('pop_size', pop_size) self._pm = check_value_positive('pm', pm) self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._targeted = check_param_type('targeted', targeted, bool) self._t_max = check_int_positive('t_max', t_max) self._reduce_iters = check_int_positive('reduction_iters', reduction_iters) self._sparse = check_param_type('sparse', sparse, bool)
def __init__(self, model, pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3, bounds=(0, 1.0), adaptive=False, sparse=True): super(GeneticAttack, self).__init__() self._model = check_model('model', model, BlackModel) self._per_bounds = check_value_positive('per_bounds', per_bounds) self._pop_size = check_int_positive('pop_size', pop_size) self._step_size = check_value_positive('step_size', step_size) self._temp = check_value_positive('temp', temp) self._max_steps = check_int_positive('max_steps', max_steps) self._mutation_rate = check_value_positive('mutation_rate', mutation_rate) self._adaptive = check_param_type('adaptive', adaptive, bool) self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) # initial global optimum fitness value self._best_fit = -1 # count times of no progress self._plateau_times = 0 # count times of changing attack step self._adap_times = 0 self._sparse = check_param_type('sparse', sparse, bool)
def __init__(self, model, model_type='classification', targeted=False, reserve_ratio=0.3, sparse=True, step_size=0.5, per_bounds=0.6, c1=2.0, c2=2.0, c=2.0, pop_size=6, t_max=1000, pm=0.5, bounds=None): super(PSOAttack, self).__init__() self._model = check_model('model', model, BlackModel) self._step_size = check_value_positive('step_size', step_size) self._per_bounds = check_value_positive('per_bounds', per_bounds) self._c1 = check_value_positive('c1', c1) self._c2 = check_value_positive('c2', c2) self._c = check_value_positive('c', c) self._pop_size = check_int_positive('pop_size', pop_size) self._pm = check_value_non_negative('pm', pm) if self._pm > 1: msg = "pm should not be greater than 1.0, but got {}.".format(self._pm) LOGGER.error(TAG, msg) raise ValueError(msg) self._bounds = bounds if self._bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._targeted = check_param_type('targeted', targeted, bool) self._t_max = check_int_positive('t_max', t_max) self._sparse = check_param_type('sparse', sparse, bool) self._model_type = check_param_type('model_type', model_type, str) if self._model_type not in ('classification', 'detection'): msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type) LOGGER.error(TAG, msg) raise ValueError(msg) self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio) if self._reserve_ratio > 1: msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio) LOGGER.error(TAG, msg) raise ValueError(msg)
def __init__(self, model, bounds=(0.0, 1.0), max_iter=100, is_targeted=False, sparse=True): super(SaltAndPepperNoiseAttack, self).__init__() self._model = check_model('model', model, BlackModel) self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._max_iter = check_int_positive('max_iter', max_iter) self._is_targeted = check_param_type('is_targeted', is_targeted, bool) self._sparse = check_param_type('sparse', sparse, bool)
def __init__(self, auto_encoder, false_positive_rate=0.01, bounds=(0.0, 1.0)): super(ErrorBasedDetector, self).__init__() self._auto_encoder = check_model('auto_encoder', auto_encoder, Model) self._false_positive_rate = check_param_in_range('false_positive_rate', false_positive_rate, 0, 1) self._threshold = 0.0 self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float])
def __init__(self, auto_encoder, model, option="jsd", t=1, bounds=(0.0, 1.0)): super(DivergenceBasedDetector, self).__init__(auto_encoder, bounds=bounds) self._auto_encoder = auto_encoder self._model = check_model('targeted model', model, Model) self._threshold = 0.0 self._option = option self._t = check_int_positive('t', t) self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float])
def __init__(self, network, attacks, loss_fn=None, optimizer=None, bounds=(0.0, 1.0), replace_ratio=0.5): super(AdversarialDefenseWithAttacks, self).__init__(network, loss_fn, optimizer) self._attacks = check_param_type('attacks', attacks, list) self._bounds = check_param_multi_types('bounds', bounds, [tuple, list]) for elem in self._bounds: _ = check_param_multi_types('bound', elem, [int, float]) self._replace_ratio = check_param_in_range('replace_ratio', replace_ratio, 0, 1) self._graph_initialized = False
def __init__(self, network, num_classes, max_iters=50, overshoot=0.02, norm_level=2, bounds=None, sparse=True): super(DeepFool, self).__init__() self._network = check_model('network', network, Cell) self._network.set_grad(True) self._max_iters = check_int_positive('max_iters', max_iters) self._overshoot = check_value_positive('overshoot', overshoot) self._norm_level = check_norm_level(norm_level) self._num_classes = check_int_positive('num_classes', num_classes) self._net_grad = GradWrap(self._network) self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) self._sparse = check_param_type('sparse', sparse, bool) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float])
def calculate_coverage(self, dataset, bias_coefficient=0, batch_size=32): """ Calculate the testing adequacy of the given dataset. Args: dataset (numpy.ndarray): Data for fuzz test. bias_coefficient (Union[int, float]): The coefficient used for changing the neurons' output boundaries. Default: 0. batch_size (int): The number of samples in a predict batch. Default: 32. Examples: >>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images) >>> model_fuzz_test.calculate_coverage(test_images) """ dataset = check_numpy_param('dataset', dataset) batch_size = check_int_positive('batch_size', batch_size) bias_coefficient = check_param_multi_types('bias_coefficient', bias_coefficient, [int, float]) self._lower_bounds -= bias_coefficient * self._var self._upper_bounds += bias_coefficient * self._var intervals = (self._upper_bounds - self._lower_bounds) / self._segmented_num batches = dataset.shape[0] // batch_size for i in range(batches): self._sections_hits_count( dataset[i * batch_size:(i + 1) * batch_size], intervals)
def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), nb_iter=5, loss_fn=None): super(IterativeGradientMethod, self).__init__() self._network = check_model('network', network, Cell) self._eps = check_value_positive('eps', eps) self._eps_iter = check_value_positive('eps_iter', eps_iter) self._nb_iter = check_int_positive('nb_iter', nb_iter) self._bounds = None if bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) if loss_fn is None: self._loss_grad = network else: self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn)) self._loss_grad.set_train()
def __init__(self, network, weights): super(InversionLoss, self).__init__() self._network = check_param_type('network', network, Cell) self._mse_loss = MSELoss() self._weights = check_param_multi_types('weights', weights, [list, tuple]) self._get_shape = P.Shape() self._zeros = P.ZerosLike() self._device_target = context.get_context("device_target")
def set_params(self, x_bias=0, y_bias=0, auto_param=False): """ Set translate parameters. Args: x_bias (Union[float, int]): X-direction translation. Default: 0. y_bias (Union[float, int]): Y-direction translation. Default: 0. auto_param (bool): True if auto generate parameters. Default: False. """ self.auto_param = auto_param if auto_param: self.x_bias = np.random.uniform(-0.3, 0.3) self.y_bias = np.random.uniform(-0.3, 0.3) else: self.x_bias = check_param_multi_types('x_bias', x_bias, [int, float]) self.y_bias = check_param_multi_types('y_bias', y_bias, [int, float])
def _confidence_cla(self, inputs, labels): """ Calculate the prediction confidence of corresponding label or max confidence of inputs. Args: inputs (numpy.ndarray): Input samples. labels (Union[numpy.int, numpy.int16, numpy.int32, numpy.int64]): Target labels. Returns: float, the prediction confidences of inputs. """ check_numpy_param('inputs', inputs) check_param_multi_types('labels', labels, (np.int, np.int16, np.int32, np.int64)) confidences = self._model.predict(inputs) if self._targeted: confi_choose = confidences[:, labels] else: confi_choose = np.max(confidences, axis=1) return confi_choose
def set_params(self, factor_x=1, factor_y=1, auto_param=False): """ Set scale parameters. Args: factor_x (Union[float, int]): Rescale in X-direction, x=factor_x*x. Default: 1. factor_y (Union[float, int]): Rescale in Y-direction, y=factor_y*y. Default: 1. auto_param (bool): True if auto generate parameters. Default: False. """ if auto_param: self.factor_x = np.random.uniform(0.7, 3) self.factor_y = np.random.uniform(0.7, 3) else: self.factor_x = check_param_multi_types('factor_x', factor_x, [int, float]) self.factor_y = check_param_multi_types('factor_y', factor_y, [int, float])
def __init__(self, network, input_shape, input_bound, loss_weights=(1, 0.2, 5)): self._network = check_param_type('network', network, Cell) for sub_loss_weight in loss_weights: check_value_positive('sub_loss_weight', sub_loss_weight) self._loss = InversionLoss(self._network, loss_weights) self._input_shape = check_param_type('input_shape', input_shape, tuple) for shape_dim in input_shape: check_int_positive('shape_dim', shape_dim) self._input_bound = check_param_multi_types('input_bound', input_bound, [list, tuple]) for value_bound in self._input_bound: check_param_multi_types('value_bound', value_bound, [float, int]) if self._input_bound[0] > self._input_bound[1]: msg = 'input_bound[0] should not be larger than input_bound[1], but got them as {} and {}'.format( self._input_bound[0], self._input_bound[1]) raise ValueError(msg)
def to_tensor_tuple(inputs_ori): """Transfer inputs data into tensor type.""" inputs_ori = check_param_multi_types('inputs_ori', inputs_ori, [np.ndarray, tuple]) if isinstance(inputs_ori, tuple): inputs_tensor = tuple() for item in inputs_ori: inputs_tensor += (Tensor(item), ) else: inputs_tensor = (Tensor(inputs_ori), ) return inputs_tensor
def set_params(self, radius=0, auto_param=False): """ Set blur parameters. Args: radius (Union[float, int]): Blur radius, 0 means no blur. Default: 0. auto_param (bool): True if auto generate parameters. Default: False. """ if auto_param: self.radius = np.random.uniform(-1.5, 1.5) else: self.radius = check_param_multi_types('radius', radius, [int, float])
def set_params(self, angle=0, auto_param=False): """ Set rotate parameters. Args: angle(Union[float, int]): Degrees counter clockwise. Default: 0. auto_param (bool): True if auto generate parameters. Default: False. """ if auto_param: self.angle = np.random.uniform(0, 360) else: self.angle = check_param_multi_types('angle', angle, [int, float])
def __init__(self, model, model_type='classification', targeted=True, reserve_ratio=0.3, sparse=True, pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3, bounds=(0, 1.0), adaptive=False, c=0.1): super(GeneticAttack, self).__init__() self._model = check_model('model', model, BlackModel) self._model_type = check_param_type('model_type', model_type, str) if self._model_type not in ('classification', 'detection'): msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type) LOGGER.error(TAG, msg) raise ValueError(msg) self._targeted = check_param_type('targeted', targeted, bool) self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio) if self._reserve_ratio > 1: msg = "reserve_ratio should not be greater than 1.0, but got {}.".format(self._reserve_ratio) LOGGER.error(TAG, msg) raise ValueError(msg) self._sparse = check_param_type('sparse', sparse, bool) self._per_bounds = check_value_positive('per_bounds', per_bounds) self._pop_size = check_int_positive('pop_size', pop_size) self._step_size = check_value_positive('step_size', step_size) self._temp = check_value_positive('temp', temp) self._max_steps = check_int_positive('max_steps', max_steps) self._mutation_rate = check_value_non_negative('mutation_rate', mutation_rate) if self._mutation_rate > 1: msg = "mutation_rate should not be greater than 1.0, but got {}.".format(self._mutation_rate) LOGGER.error(TAG, msg) raise ValueError(msg) self._adaptive = check_param_type('adaptive', adaptive, bool) # initial global optimum fitness value self._best_fit = -np.inf # count times of no progress self._plateau_times = 0 # count times of changing attack step_size self._adap_times = 0 self._bounds = bounds if self._bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._c = check_value_positive('c', c)
def set_params(self, factor=1, auto_param=False): """ Set brightness parameters. Args: factor (Union[float, int]): Control the brightness of an image. If 1 gives the original image. If 0 gives a black image. Default: 1. auto_param (bool): True if auto generate parameters. Default: False. """ if auto_param: self.factor = np.random.uniform(0, 5) else: self.factor = check_param_multi_types('factor', factor, [int, float])
def set_params(self, factor=0, auto_param=False): """ Set noise parameters. Args: factor (Union[float, int]): factor is the ratio of pixels to add noise. If 0 gives the original image. Default 0. auto_param (bool): True if auto generate parameters. Default: False. """ if auto_param: self.factor = np.random.uniform(0, 1) else: self.factor = check_param_multi_types('factor', factor, [int, float])
def __init__(self, network, num_classes, model_type='classification', reserve_ratio=0.3, max_iters=50, overshoot=0.02, norm_level=2, bounds=None, sparse=True): super(DeepFool, self).__init__() self._network = check_model('network', network, Cell) self._max_iters = check_int_positive('max_iters', max_iters) self._overshoot = check_value_positive('overshoot', overshoot) self._norm_level = check_norm_level(norm_level) self._num_classes = check_int_positive('num_classes', num_classes) self._net_grad = GradWrap(self._network) self._bounds = bounds if self._bounds is not None: self._bounds = check_param_multi_types('bounds', bounds, [list, tuple]) for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) self._sparse = check_param_type('sparse', sparse, bool) self._model_type = check_param_type('model_type', model_type, str) if self._model_type not in ('classification', 'detection'): msg = "Only 'classification' or 'detection' is supported now, but got {}.".format( self._model_type) LOGGER.error(TAG, msg) raise ValueError(msg) self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio) if self._reserve_ratio > 1: msg = 'reserve_ratio should be less than 1.0, but got {}.'.format( self._reserve_ratio) LOGGER.error(TAG, msg) raise ValueError(TAG, msg)
def _check_attack_params(self, method, params): """Check input parameters of attack methods.""" allow_params = self._attack_param_checklists[method].keys() for param_name in params: if param_name not in allow_params: msg = "parameters of {} must in {}".format( method, allow_params) raise ValueError(msg) check_param_type(param_name, params[param_name], list) for param_value in params[param_name]: if param_name == 'bounds': bounds = check_param_multi_types('bounds', param_value, [tuple]) if len(bounds) != 2: msg = 'The format of bounds must be format (lower_bound, upper_bound),' \ 'but got its length as{}'.format(len(bounds)) raise ValueError(msg) for bound_value in bounds: _ = check_param_multi_types('bound', bound_value, [int, float]) if bounds[0] >= bounds[1]: msg = "upper bound must more than lower bound, " \ "but upper bound got {}, lower bound " \ "got {}".format(bounds[0], bounds[1]) raise ValueError(msg) elif param_name == 'norm_level': _ = check_norm_level(param_value) else: allow_type = self._attack_param_checklists[method][ param_name]['dtype'] allow_range = self._attack_param_checklists[method][ param_name]['range'] _ = check_param_multi_types(str(param_name), param_value, allow_type) _ = check_param_in_range(str(param_name), param_value, allow_range[0], allow_range[1])
def __init__(self, metrics_name, metrics_data, labels, title, scale='hide'): self._metrics_name = check_param_multi_types('metrics_name', metrics_name, [tuple, list]) self._metrics_data = check_numpy_param('metrics_data', metrics_data) self._labels = check_param_multi_types('labels', labels, (tuple, list)) _, _ = check_equal_length('metrics_name', metrics_name, 'metrics_data', self._metrics_data[0]) _, _ = check_equal_length('labels', labels, 'metrics_data', metrics_data) self._title = check_param_type('title', title, str) if scale in ['hide', 'norm', 'sparse', 'dense']: self._scale = scale else: msg = "scale must be in ['hide', 'norm', 'sparse', 'dense'], but " \ "got {}".format(scale) LOGGER.error(TAG, msg) raise ValueError(msg) self._nb_var = len(metrics_name) # divide the plot / number of variable self._angles = [ n / self._nb_var * 2.0 * pi for n in range(self._nb_var) ] self._angles += self._angles[:1] # add one more point data = [self._metrics_data, self._metrics_data[:, [0]]] self._metrics_data = np.concatenate(data, axis=1)
def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, bin_search_steps=5, max_iterations=1000, confidence=0, learning_rate=5e-3, initial_const=1e-2, abort_early_check_ratio=5e-2, targeted=False, fast=True, abort_early=True, sparse=True): LOGGER.info(TAG, "init CW object.") super(CarliniWagnerL2Attack, self).__init__() self._network = check_model('network', network, Cell) self._network.set_grad(True) self._num_classes = check_int_positive('num_classes', num_classes) self._min = check_param_type('box_min', box_min, float) self._max = check_param_type('box_max', box_max, float) self._bin_search_steps = check_int_positive('search_steps', bin_search_steps) self._max_iterations = check_int_positive('max_iterations', max_iterations) self._confidence = check_param_multi_types('confidence', confidence, [int, float]) self._learning_rate = check_value_positive('learning_rate', learning_rate) self._initial_const = check_value_positive('initial_const', initial_const) self._abort_early = check_param_type('abort_early', abort_early, bool) self._fast = check_param_type('fast', fast, bool) self._abort_early_check_ratio = check_value_positive( 'abort_early_check_ratio', abort_early_check_ratio) self._targeted = check_param_type('targeted', targeted, bool) self._net_grad = GradWrap(self._network) self._sparse = check_param_type('sparse', sparse, bool) self._dtype = None