示例#1
0
def calculate_lp_distance(original_image, compared_image):
    """
    Calculate l0, l2 and linf distance for two images with the same shape.

    Args:
        original_image (np.ndarray): Original image.
        compared_image (np.ndarray): Another image for comparison.

    Returns:
        - float, l0 distances between two images.

        - float, l2 distances between two images.

        - float, linf distances between two images.

    Raises:
        TypeError: If type of original_image or type of compared_image is not numpy.ndarray.
        ValueError: If the shape of original_image and compared_image are not the same.
    """
    check_numpy_param('original_image', original_image)
    check_numpy_param('compared_image', compared_image)
    check_equal_shape('original_image', original_image, 'compared_image',
                      compared_image)
    avoid_zero_div = 1e-14
    diff = (original_image - compared_image).flatten()
    data = original_image.flatten()
    l0_dist = np.linalg.norm(diff, ord=0) \
               / (np.linalg.norm(data, ord=0) + avoid_zero_div)
    l2_dist = np.linalg.norm(diff, ord=2) \
               / (np.linalg.norm(data, ord=2) + avoid_zero_div)
    linf_dist = np.linalg.norm(diff, ord=np.inf) \
                 / (np.linalg.norm(data, ord=np.inf) + avoid_zero_div)
    return l0_dist, l2_dist, linf_dist
示例#2
0
def calculate_iou(box_i, box_j):
    """
    Calculate the intersection over union (iou) of two boxes.

    Args:
        box_i (numpy.ndarray): Coordinates of the first box, with the format as (x1, y1, x2, y2).
            (x1, y1) and (x2, y2) are coordinates of the lower left corner and the upper right corner,
            respectively.
        box_j (numpy.ndarray): Coordinates of the second box, with the format as (x1, y1, x2, y2).

    Returns:
        float, iou of two input boxes.
    """
    check_numpy_param('box_i', box_i)
    check_numpy_param('box_j', box_j)
    if box_i.shape[-1] != 4 or box_j.shape[-1] != 4:
        msg = 'The length of both coordinate arrays should be 4, bug got {} and {}.'.format(
            box_i.shape, box_j.shape)
        LOGGER.error(TAG, msg)
        raise ValueError(msg)
    i_x1, i_y1, i_x2, i_y2 = box_i
    j_x1, j_y1, j_x2, j_y2 = box_j
    s_i = (i_x2 - i_x1) * (i_y2 - i_y1)
    s_j = (j_x2 - j_x1) * (j_y2 - j_y1)
    inner_left_line = max(i_x1, j_x1)
    inner_right_line = min(i_x2, j_x2)
    inner_top_line = min(i_y2, j_y2)
    inner_bottom_line = max(i_y1, j_y1)
    if inner_left_line >= inner_right_line or inner_top_line <= inner_bottom_line:
        return 0
    inner_area = (inner_right_line - inner_left_line) * (inner_top_line -
                                                         inner_bottom_line)
    return inner_area / (s_i + s_j - inner_area)
    def __init__(self, raw_preds, def_preds, raw_query_counts,
                 def_query_counts, raw_query_time, def_query_time,
                 def_detection_counts, true_labels, max_queries):
        self._raw_preds, self._def_preds = check_pair_numpy_param(
            'raw_preds', raw_preds, 'def_preds', def_preds)
        self._num_samples = self._raw_preds.shape[0]
        self._raw_query_counts, _ = check_equal_length('raw_query_counts',
                                                       raw_query_counts,
                                                       'number of sample',
                                                       self._raw_preds)
        self._def_query_counts, _ = check_equal_length('def_query_counts',
                                                       def_query_counts,
                                                       'number of sample',
                                                       self._raw_preds)
        self._raw_query_time, _ = check_equal_length('raw_query_time',
                                                     raw_query_time,
                                                     'number of sample',
                                                     self._raw_preds)
        self._def_query_time, _ = check_equal_length('def_query_time',
                                                     def_query_time,
                                                     'number of sample',
                                                     self._raw_preds)

        self._num_adv_samples = self._raw_query_counts[
            self._raw_query_counts > 0].shape[0]

        self._num_adv_samples = check_int_positive(
            'the number of adversarial samples', self._num_adv_samples)

        self._num_ben_samples = self._num_samples - self._num_adv_samples
        self._max_queries = check_int_positive('max_queries', max_queries)

        self._def_detection_counts = check_numpy_param('def_detection_counts',
                                                       def_detection_counts)
        self._true_labels = check_numpy_param('true_labels', true_labels)
示例#4
0
    def _fitness(self, confi_ori, confi_adv, x_ori, x_adv):
        """
        Calculate the fitness value for each particle.

        Args:
            confi_ori (float): Maximum confidence or target label confidence of
                the original benign inputs' prediction confidences.
            confi_adv (float): Maximum confidence or target label confidence of
                the adversarial samples' prediction confidences.
            x_ori (numpy.ndarray): Benign samples.
            x_adv (numpy.ndarray): Adversarial samples.

        Returns:
            - float, fitness values of adversarial particles.

            - int, query times after reduction.

        Examples:
            >>> fitness = self._fitness(2.4, 1.2, [0.2, 0.3, 0.1], [0.21,
            >>> 0.34, 0.13])
        """
        x_ori = check_numpy_param('x_ori', x_ori)
        x_adv = check_numpy_param('x_adv', x_adv)
        fit_value = abs(
            confi_ori - confi_adv) - self._c / self._pop_size*np.linalg.norm(
                (x_adv - x_ori).reshape(x_adv.shape[0], -1), axis=1)
        if np.max(fit_value) < 0:
            self._c /= 2
        return fit_value
示例#5
0
    def _fast_reduction(self, x_ori, best_position, q_times, auxiliary_inputs,
                        gt_boxes, gt_labels, model):
        """
        Decrease the differences between the original samples and adversarial samples in a fast way.

        Args:
            x_ori (numpy.ndarray): Original samples.
            best_position (numpy.ndarray): Adversarial examples.
            q_times (int): Query times.
            auxiliary_inputs (tuple): Auxiliary inputs mathced with x_ori.
            gt_boxes (numpy.ndarray): Ground-truth boxes of x_ori.
            gt_labels (numpy.ndarray): Ground-truth labels of x_ori.
            model (BlackModel): Target model.

        Returns:
            - numpy.ndarray, adversarial examples after reduction.

            - int, total query times after reduction.
        """
        LOGGER.info(TAG, 'Reduction begins...')
        model = check_model('model', model, BlackModel)
        x_ori = check_numpy_param('x_ori', x_ori)
        _, gt_num = self._detection_scores((x_ori, ) + auxiliary_inputs,
                                           gt_boxes, gt_labels, model)
        best_position = check_numpy_param('best_position', best_position)
        x_ori, best_position = check_equal_shape('x_ori', x_ori,
                                                 'best_position',
                                                 best_position)
        _, original_num = self._detection_scores(
            (best_position, ) + auxiliary_inputs, gt_boxes, gt_labels, model)
        # pylint: disable=invalid-name
        REDUCTION_ITERS = 6  # recover 10% difference each time and recover 60% totally.
        for _ in range(REDUCTION_ITERS):
            BLOCK_NUM = 30  # divide the image into 30 segments
            block_width = best_position.shape[0] // BLOCK_NUM
            if block_width > 0:
                for i in range(BLOCK_NUM):
                    diff = x_ori[i*block_width: (i+1)*block_width, :, :]\
                           - best_position[i*block_width:(i+1)*block_width, :, :]
                    if np.max(np.abs(diff)) >= 0.1 * (self._bounds[1] -
                                                      self._bounds[0]):
                        res = diff * 0.1
                        best_position[i * block_width:(i + 1) *
                                      block_width, :, :] += res
                        _, correct_num = self._detection_scores(
                            (best_position, ) + auxiliary_inputs, gt_boxes,
                            gt_labels, model)
                        q_times += 1
                        if correct_num[0] > max(
                                original_num[0],
                                gt_num[0] * self._reserve_ratio):
                            best_position[i * block_width:(i + 1) *
                                          block_width, :, :] -= res
        return best_position, q_times
    def detect(self, inputs):
        """
        Tell whether input samples are adversarial or not.

        Args:
            inputs (numpy.ndarray): Suspicious samples to be judged.

        Returns:
            list[int], whether a sample is adversarial. if res[i]=1, then the
            input sample with index i is adversarial.
        """
        LOGGER.debug(TAG, 'enter detect().')
        self._radius = check_param_type('radius', self._radius, float)
        inputs = check_numpy_param('inputs', inputs)
        time_start = time.time()
        res = [1] * inputs.shape[0]
        raw_preds = np.argmax(self._model.predict(Tensor(inputs)).asnumpy(),
                              axis=1)
        rc_preds = self._rc_forward(inputs, self._radius)
        for i in range(inputs.shape[0]):
            if raw_preds[i] == rc_preds[i]:
                res[i] = 0
        LOGGER.debug(TAG, 'time used to detect %d samples is : %s seconds',
                     inputs.shape[0],
                     time.time() - time_start)
        return res
    def detect(self, inputs):
        """
        Detect adversarial examples from input samples.

        Args:
            inputs (numpy.ndarray): Input samples.

        Returns:
            list[int], whether a sample is adversarial. if res[i]=1, then the
            input sample with index i is adversarial.

        Raises:
            ValueError: If policy is not supported.
        """

        inputs = check_numpy_param('inputs', inputs)
        x_len = inputs.shape[0]
        counts = np.zeros(x_len)
        res = np.zeros(x_len, dtype=np.int)
        for detector in list(self._detectors):
            idx = detector.detect(inputs)
            counts[idx] += 1

        if self._policy == "vote":
            idx_adv = np.argwhere(counts > self._num_detectors / 2)
        elif self._policy == "all":
            idx_adv = np.argwhere(counts == self._num_detectors)
        elif self._policy == "any":
            idx_adv = np.argwhere(counts > 0)
        else:
            msg = 'Policy {} is not supported.'.format(self._policy)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        res[idx_adv] = 1
        return list(res)
    def __init__(self,
                 inputs,
                 labels,
                 adv_inputs,
                 adv_preds,
                 targeted=False,
                 target_label=None):
        self._inputs, self._labels = check_pair_numpy_param(
            'inputs', inputs, 'labels', labels)
        self._adv_inputs, self._adv_preds = check_pair_numpy_param(
            'adv_inputs', adv_inputs, 'adv_preds', adv_preds)
        targeted = check_param_type('targeted', targeted, bool)
        self._targeted = targeted
        if target_label is not None:
            target_label = check_numpy_param('target_label', target_label)
        self._target_label = target_label
        self._true_label = np.argmax(self._labels, axis=1)
        self._adv_label = np.argmax(self._adv_preds, axis=1)

        idxes = np.arange(self._adv_preds.shape[0])
        if self._targeted:
            if target_label is None:
                msg = 'targeted attack need target_label, but got None.'
                LOGGER.error(TAG, msg)
                raise ValueError(msg)
            self._adv_preds, self._target_label = check_pair_numpy_param(
                'adv_pred', self._adv_preds, 'target_label', target_label)
            self._success_idxes = idxes[self._adv_label == self._target_label]
        else:
            self._success_idxes = idxes[self._adv_label != self._true_label]
示例#9
0
    def transform(self, image):
        """
        Transform the image.

        Args:
            image(numpy.ndarray): Original image to be transformed.

        Returns:
            numpy.ndarray, transformed image.
        """
        image = check_numpy_param('image', image)
        ori_dtype = image.dtype
        rgb, chw, normalized, gray3dim, image = self._check(image)
        if rgb:
            h, w, _ = np.shape(image)
        else:
            h, w = np.shape(image)
        move_x_centor = w / 2*(1 - self.factor_x)
        move_y_centor = h / 2*(1 - self.factor_y)
        img = to_pil(image)
        trans_image = img.transform(img.size, Image.AFFINE,
                                    (self.factor_x, 0, move_x_centor,
                                     0, self.factor_y, move_y_centor))
        trans_image = self._original_format(trans_image, chw, normalized,
                                            gray3dim)
        return trans_image.astype(ori_dtype)
    def test_adequacy_coverage_calculate(self,
                                         dataset,
                                         bias_coefficient=0,
                                         batch_size=32):
        """
        Calculate the testing adequacy of the given dataset.

        Args:
            dataset (numpy.ndarray): Data for fuzz test.
            bias_coefficient (float): The coefficient used for changing the
                neurons' output boundaries. Default: 0.
            batch_size (int): The number of samples in a predict batch.
                Default: 32.

        Examples:
            >>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images)
            >>> model_fuzz_test.test_adequacy_coverage_calculate(test_images)
        """
        dataset = check_numpy_param('dataset', dataset)
        batch_size = check_int_positive('batch_size', batch_size)
        self._lower_bounds -= bias_coefficient * self._var
        self._upper_bounds += bias_coefficient * self._var
        intervals = (self._upper_bounds - self._lower_bounds) / self._k
        batches = dataset.shape[0] // batch_size
        for i in range(batches):
            self._sections_hits_count(
                dataset[i * batch_size:(i + 1) * batch_size], intervals)
示例#11
0
    def __call__(self,
                 gradient,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
                 epsilon=1e-8):
        """
        Calculate the optimum perturbation for each iteration.

        Args:
            gradient (numpy.ndarray): The gradient of the loss w.r.t. to the
                variable.
            learning_rate (float): The learning rate in the current iteration.
                Default: 0.001.
            beta1 (float): Decay rate for calculating the exponentially
                decaying average of past gradients. Default: 0.9.
            beta2 (float): Decay rate for calculating the exponentially
                decaying average of past squared gradients. Default: 0.999.
            epsilon (float): Small value to avoid division by zero.
                Default: 1e-8.

        Returns:
            numpy.ndarray, perturbations.

        Examples:
            >>> perturbs = optimizer([0.2, 0.1, 0.15], 0.005)
        """
        gradient = check_numpy_param('gradient', gradient)
        self._t += 1
        self._m = beta1 * self._m + (1 - beta1) * gradient
        self._v = beta2 * self._v + (1 - beta2) * gradient**2
        alpha = learning_rate * np.sqrt(1 - beta2**self._t) / (1 -
                                                               beta1**self._t)
        pertur = -alpha * self._m / (np.sqrt(self._v) + epsilon)
        return pertur
示例#12
0
    def detect_diff(self, inputs):
        """
        Detect the distance between original samples and reconstructed samples.

        The distance is calculated by JSD.

        Args:
            inputs (numpy.ndarray): Input samples.

        Returns:
             float, the distance.

        Raises:
            NotImplementedError: If the param `option` is not supported.
        """
        inputs = check_numpy_param('inputs', inputs)
        x_len = inputs.shape[0]
        x_transformed = self._auto_encoder.predict(Tensor(inputs))
        x_origin = self._model.predict(Tensor(inputs))
        x_trans = self._model.predict(x_transformed)

        y_pred = softmax(x_origin.asnumpy() / self._t, axis=1)
        y_trans_pred = softmax(x_trans.asnumpy() / self._t, axis=1)

        if self._option == 'jsd':
            marks = [_jsd(y_pred[i], y_trans_pred[i]) for i in range(x_len)]
        else:
            msg = '{} is not implemented.'.format(self._option)
            LOGGER.error(TAG, msg)
            raise NotImplementedError(msg)
        return np.array(marks)
示例#13
0
    def _reduction(x_ori, q_times, label, best_position, model,
                   targeted_attack):
        """
        Decrease the differences between the original samples and adversarial samples.

        Args:
            x_ori (numpy.ndarray): Original samples.
            q_times (int): Query times.
            label (int): Target label ot ground-truth label.
            best_position (numpy.ndarray): Adversarial examples.
            model (BlackModel): Target model.
            targeted_attack (bool): If True, it means this is a targeted attack. If False,
                it means this is an untargeted attack.

        Returns:
            numpy.ndarray, adversarial examples after reduction.

        Examples:
            >>> adv_reduction = self._reduction(self, [0.1, 0.2, 0.3], 20, 1,
            >>> [0.12, 0.15, 0.25])
        """
        LOGGER.info(TAG, 'Reduction begins...')
        model = check_model('model', model, BlackModel)
        x_ori = check_numpy_param('x_ori', x_ori)
        best_position = check_numpy_param('best_position', best_position)
        x_ori, best_position = check_equal_shape('x_ori', x_ori,
                                                 'best_position',
                                                 best_position)
        x_ori_fla = x_ori.flatten()
        best_position_fla = best_position.flatten()
        pixel_deep = np.max(x_ori) - np.min(x_ori)
        nums_pixel = len(x_ori_fla)
        for i in range(nums_pixel):
            diff = x_ori_fla[i] - best_position_fla[i]
            if abs(diff) > pixel_deep * 0.1:
                best_position_fla[i] += diff * 0.5
                cur_label = np.argmax(
                    model.predict(best_position_fla.reshape(x_ori.shape)))
                q_times += 1
                if targeted_attack:
                    if cur_label != label:
                        best_position_fla[i] -= diff * 0.5

                else:
                    if cur_label == label:
                        best_position_fla -= diff * 0.5
        return best_position_fla.reshape(x_ori.shape), q_times
示例#14
0
    def set_target_images(self, target_images):
        """
        Set target samples for target attack.

        Args:
            target_images (numpy.ndarray): Target samples for target attack.
        """
        self.target_imgs = check_numpy_param('target_images', target_images)
    def set_target_images(self, target_images):
        """
        Setting target images for target attack.

        Args:
            target_images (numpy.ndarray): Target images.
        """
        self.image_targets = check_numpy_param('target_images', target_images)
示例#16
0
    def set_target_images(self, target_images):
        """
        Set target samples for target attack in the Partial-Info setting or Label-Only setting.

        Args:
            target_images (numpy.ndarray): Target samples for target attack.
        """
        self.target_imgs = check_numpy_param('target_images', target_images)
示例#17
0
    def fit(self, inputs, labels=None):
        """
        Process input training data to calculate the threshold.
        A proper threshold should make sure the false positive
        rate is under a given value.

        Args:
            inputs (numpy.ndarray): Training data to calculate the threshold.
            labels (numpy.ndarray): Labels of training data.

        Returns:
            - list[int], number of the nearest neighbors.

            - list[float], calculated thresholds for different K.

        Raises:
            ValueError: The number of training data is less than
                max_k_neighbor!
        """
        data = check_numpy_param('inputs', inputs)
        data_len = data.shape[0]
        if data_len < self._max_k_neighbor:
            raise ValueError('The number of training data must be larger than '
                             'max_k_neighbor!')
        data = self._trans_model.predict(Tensor(data)).asnumpy()
        data = data.reshape((data.shape[0], -1))
        distances = []
        for i in range(data.shape[0] // self._chunk_size):
            distance_mat = _pairwise_distances(
                x_input=data[i * self._chunk_size:(i + 1) *
                             self._chunk_size, :],
                y_input=data)
            distance_mat = np.sort(distance_mat, axis=-1)
            distances.append(distance_mat[:, :self._max_k_neighbor])
        # the rest
        distance_mat = _pairwise_distances(
            x_input=data[(data.shape[0] // self._chunk_size) *
                         self._chunk_size:, :],
            y_input=data)
        distance_mat = np.sort(distance_mat, axis=-1)
        distances.append(distance_mat[:, :self._max_k_neighbor])

        distance_matrix = np.concatenate(distances, axis=0)

        start = 1 if self._tuning else self._max_k_neighbor

        thresholds = []
        num_nearest_neighbors = []
        for k in range(start, self._max_k_neighbor + 1):
            avg_dist = distance_matrix[:, :k].mean(axis=-1)
            index = int(len(avg_dist) * self._fpr)
            threshold = np.sort(avg_dist, axis=None)[index]
            num_nearest_neighbors.append(k)
            thresholds.append(threshold)
        if thresholds:
            self._threshold = thresholds[-1]
            self._num_of_neighbors = num_nearest_neighbors[-1]
        return num_nearest_neighbors, thresholds
示例#18
0
    def _reduction(self, x_ori, q_times, label, best_position):
        """
        Decrease the differences between the original samples and adversarial samples.

        Args:
            x_ori (numpy.ndarray): Original samples.
            q_times (int): Query times.
            label (int): Target label ot ground-truth label.
            best_position (numpy.ndarray): Adversarial examples.

        Returns:
            numpy.ndarray, adversarial examples after reduction.

        Examples:
            >>> adv_reduction = self._reduction(self, [0.1, 0.2, 0.3], 20, 1,
            >>> [0.12, 0.15, 0.25])
        """
        x_ori = check_numpy_param('x_ori', x_ori)
        best_position = check_numpy_param('best_position', best_position)
        x_ori, best_position = check_equal_shape('x_ori', x_ori,
                                                 'best_position',
                                                 best_position)
        x_ori_fla = x_ori.flatten()
        best_position_fla = best_position.flatten()
        pixel_deep = self._bounds[1] - self._bounds[0]
        nums_pixel = len(x_ori_fla)
        for i in range(nums_pixel):
            diff = x_ori_fla[i] - best_position_fla[i]
            if abs(diff) > pixel_deep * 0.1:
                old_poi_fla = np.copy(best_position_fla)
                best_position_fla[i] = np.clip(
                    best_position_fla[i] + diff * 0.5, self._bounds[0],
                    self._bounds[1])
                cur_label = np.argmax(
                    self._model.predict(
                        np.expand_dims(best_position_fla.reshape(x_ori.shape),
                                       axis=0))[0])
                q_times += 1
                if self._targeted:
                    if cur_label != label:
                        best_position_fla = old_poi_fla
                else:
                    if cur_label == label:
                        best_position_fla = old_poi_fla
        return best_position_fla.reshape(x_ori.shape), q_times
示例#19
0
def _jsd(prob_dist_p, prob_dist_q):
    """
    Compute the Jensen-Shannon Divergence between two probability distributions
        with equal weights.

    Args:
        prob_dist_p (numpy.ndarray): Probability distribution p.
        prob_dist_q (numpy.ndarray): Probability distribution q.

    Returns:
        float, the Jensen-Shannon Divergence.
    """
    prob_dist_p = check_numpy_param('prob_dist_p', prob_dist_p)
    prob_dist_q = check_numpy_param('prob_dist_q', prob_dist_q)
    norm_dist_p = prob_dist_p / (np.linalg.norm(prob_dist_p, ord=1) + 1e-12)
    norm_dist_q = prob_dist_q / (np.linalg.norm(prob_dist_q, ord=1) + 1e-12)
    norm_mean = 0.5*(norm_dist_p + norm_dist_q)
    return 0.5*(stats.entropy(norm_dist_p, norm_mean)
                + stats.entropy(norm_dist_q, norm_mean))
示例#20
0
 def __init__(self, initial_seeds, target_model, train_dataset, const_K,
              mode='L', max_seed_num=1000):
     self.initial_seeds = initial_seeds
     self.target_model = check_model('model', target_model, Model)
     self.train_dataset = check_numpy_param('train_dataset', train_dataset)
     self.const_k = check_int_positive('const_k', const_K)
     self.mode = mode
     self.max_seed_num = check_int_positive('max_seed_num', max_seed_num)
     self.coverage_metrics = ModelCoverageMetrics(target_model, 1000, 10,
                                                  train_dataset)
示例#21
0
    def _confidence_cla(self, inputs, labels):
        """
        Calculate the prediction confidence of corresponding label or max confidence of inputs.

        Args:
            inputs (numpy.ndarray): Input samples.
            labels (Union[numpy.int, numpy.int16, numpy.int32, numpy.int64]): Target labels.

        Returns:
            float, the prediction confidences of inputs.
        """
        check_numpy_param('inputs', inputs)
        check_param_multi_types('labels', labels, (np.int, np.int16, np.int32, np.int64))
        confidences = self._model.predict(inputs)
        if self._targeted:
            confi_choose = confidences[:, labels]
        else:
            confi_choose = np.max(confidences, axis=1)
        return confi_choose
示例#22
0
 def _mutation_op(self, cur_pop):
     """
     Generate mutation samples.
     """
     cur_pop = check_numpy_param('cur_pop', cur_pop)
     perturb_noise = np.random.random(cur_pop.shape) - 0.5
     mutated_pop = perturb_noise*(np.random.random(cur_pop.shape)
                                  < self._pm) + cur_pop
     mutated_pop = np.clip(mutated_pop, cur_pop*(1 - self._per_bounds),
                           cur_pop*(1 + self._per_bounds))
     return mutated_pop
 def __init__(self, model, k, n, train_dataset):
     self._model = check_model('model', model, Model)
     self._k = k
     self._n = n
     train_dataset = check_numpy_param('train_dataset', train_dataset)
     self._lower_bounds = [np.inf] * n
     self._upper_bounds = [-np.inf] * n
     self._var = [0] * n
     self._main_section_hits = [[0 for _ in range(self._k)]
                                for _ in range(self._n)]
     self._lower_corner_hits = [0] * self._n
     self._upper_corner_hits = [0] * self._n
     self._bounds_get(train_dataset)
示例#24
0
 def __init__(self, model, segmented_num, neuron_num, train_dataset):
     self._model = check_model('model', model, Model)
     self._segmented_num = check_int_positive('segmented_num',
                                              segmented_num)
     self._neuron_num = check_int_positive('neuron_num', neuron_num)
     train_dataset = check_numpy_param('train_dataset', train_dataset)
     self._lower_bounds = [np.inf] * neuron_num
     self._upper_bounds = [-np.inf] * neuron_num
     self._var = [0] * neuron_num
     self._main_section_hits = [[0 for _ in range(self._segmented_num)]
                                for _ in range(self._neuron_num)]
     self._lower_corner_hits = [0] * self._neuron_num
     self._upper_corner_hits = [0] * self._neuron_num
     self._bounds_get(train_dataset)
示例#25
0
def _eval_info(pred, truth, option):
    """
    Calculate the performance according to pred and truth.

    Args:
        pred (numpy.ndarray): Predictions for each sample.
        truth (numpy.ndarray): Ground truth for each sample.
        option (str): Type of evaluation indicators; Possible
            values are 'precision', 'accuracy' and 'recall'.

    Returns:
        float32, calculated evaluation results.

    Raises:
        ValueError, size of parameter pred or truth is 0.
        ValueError, value of parameter option must be in ["precision", "accuracy", "recall"].
    """
    check_numpy_param("pred", pred)
    check_numpy_param("truth", truth)

    if option == "accuracy":
        count = np.sum(pred == truth)
        return count / len(pred)
    if option == "precision":
        if np.sum(pred) == 0:
            return -1
        count = np.sum(pred & truth)
        return count / np.sum(pred)
    if option == "recall":
        if np.sum(truth) == 0:
            return -1
        count = np.sum(pred & truth)
        return count / np.sum(truth)

    msg = "The metric value {} is undefined.".format(option)
    LOGGER.error(TAG, msg)
    raise ValueError(msg)
    def transform(self, inputs):
        """
        Generate hyper cube for input samples.

        Args:
            inputs (numpy.ndarray): Input samples.

        Returns:
            numpy.ndarray, hyper cube corresponds to every sample.
        """
        LOGGER.debug(TAG, 'enter transform().')
        inputs = check_numpy_param('inputs', inputs)
        res = []
        for _, elem in enumerate(inputs):
            res.append(self._generate_hyper_cube(elem, self._radius))
        return np.asarray(res)
示例#27
0
    def detect_diff(self, inputs):
        """
        Detect the distance between the original samples and reconstructed samples.

        Args:
            inputs (numpy.ndarray): Input samples.

        Returns:
            float, the distance between reconstructed and original samples.
        """
        inputs = check_numpy_param('inputs', inputs)
        x_trans = self._auto_encoder.predict(Tensor(inputs))
        diff = np.abs(inputs - x_trans.asnumpy())
        dims = tuple(np.arange(len(inputs.shape))[1:])
        marks = np.mean(np.power(diff, 2), axis=dims)
        return marks
示例#28
0
    def transform(self, inputs):
        """
        Reconstruct input samples.

        Args:
            inputs (numpy.ndarray): Input samples.

        Returns:
            numpy.ndarray, reconstructed images.
        """
        inputs = check_numpy_param('inputs', inputs)
        x_trans = self._auto_encoder.predict(Tensor(inputs))
        if self._bounds is not None:
            clip_min, clip_max = self._bounds
            x_trans = np.clip(x_trans.asnumpy(), clip_min, clip_max)
        return x_trans
示例#29
0
    def detect_diff(self, inputs):
        """
        Return the raw distance value (before apply the threshold) between
        the input sample and its smoothed counterpart.

        Args:
            inputs (numpy.ndarray): Suspicious samples to be judged.

        Returns:
            float, distance.
        """
        inputs = check_numpy_param('inputs', inputs)
        raw_pred = self._model.predict(Tensor(inputs))
        smoothing_pred = self._model.predict(Tensor(self.transform(inputs)))
        dist = self._dist(raw_pred.asnumpy(), smoothing_pred.asnumpy())
        return dist
示例#30
0
    def _mutation_op(self, cur_pop):
        """
        Generate mutation samples.

        Args:
            cur_pop (numpy.ndarray): Inputs before mutation operation.

        Returns:
            numpy.ndarray, mutational inputs.
        """
        LOGGER.info(TAG, 'Mutation happens...')
        pixel_deep = self._bounds[1] - self._bounds[0]
        cur_pop = check_numpy_param('cur_pop', cur_pop)
        perturb_noise = (np.random.random(cur_pop.shape) - 0.5)*pixel_deep
        mutated_pop = np.clip(perturb_noise*(np.random.random(cur_pop.shape) < self._pm) + cur_pop, self._bounds[0],
                              self._bounds[1])
        return mutated_pop