Ejemplo n.º 1
0
    def perturbation(self, samples, ys_target, device):
        """

        :param samples:
        :param ys_target:
        :return:
        """
        copy_samples = np.copy(samples)

        # randomization
        copy_samples = np.clip(
            copy_samples + self.alpha_ratio * self.epsilon *
            np.sign(np.random.randn(*copy_samples.shape)), 0.0,
            1.0).astype(np.float32)

        var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                      device=device,
                                      requires_grad=True)
        var_ys_target = tensor2variable(torch.from_numpy(ys_target), device)

        eps = (1 - self.alpha_ratio) * self.epsilon

        self.model.eval()
        preds = self.model(var_samples)
        loss_fun = torch.nn.CrossEntropyLoss()
        loss = loss_fun(preds, var_ys_target)
        loss.backward()
        gradient_sign = var_samples.grad.data.cpu().sign().numpy()

        adv_samples = copy_samples - eps * gradient_sign
        adv_samples = np.clip(adv_samples, 0, 1)
        return adv_samples
Ejemplo n.º 2
0
    def perturbation(self, samples, ys, device):
        """

        :param samples:
        :param ys:
        :param device:
        :return:
        """

        copy_samples = np.copy(samples)

        # add randomized single-step attack
        copy_samples = copy_samples + (self.alpha * self.epsilon * np.sign(np.random.randn(*copy_samples.shape)))
        copy_samples = np.clip(copy_samples, 0.0, 1.0).astype(np.float32)

        eps = (1.0 - self.alpha) * self.epsilon
        var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
        var_ys = tensor2variable(torch.LongTensor(ys), device=device)

        self.model.eval()
        preds = self.model(var_samples)
        loss_fun = torch.nn.CrossEntropyLoss()
        loss = loss_fun(preds, torch.max(var_ys, 1)[1])
        loss.backward()

        gradient_sign = var_samples.grad.data.cpu().sign().numpy()
        adv_samples = copy_samples + eps * gradient_sign

        adv_samples = np.clip(adv_samples, 0.0, 1.0)
        return adv_samples
Ejemplo n.º 3
0
    def perturbation(self, samples, ys, device):
        """

        :param samples:
        :param ys:
        :param device:
        :return:
        """
        copy_samples = np.copy(samples)
        self.model.to(device)

        for index in range(self.num_steps):
            var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                          device=device,
                                          requires_grad=True)
            var_ys = tensor2variable(torch.LongTensor(ys), device=device)

            self.model.eval()
            preds = self.model(var_samples)
            loss_fun = torch.nn.CrossEntropyLoss()
            loss = loss_fun(preds, torch.max(var_ys, 1)[1])
            loss.backward()

            gradient_sign = var_samples.grad.data.cpu().sign().numpy()
            copy_samples = copy_samples + self.epsilon_iter * gradient_sign

            copy_samples = np.clip(copy_samples, samples - self.epsilon,
                                   samples + self.epsilon)
            copy_samples = np.clip(copy_samples, 0.0, 1.0)
        return copy_samples
Ejemplo n.º 4
0
    def perturbation(self, samples, ys, device):
        """

        :param samples:
        :param ys:
        :param device:
        :return:
        """

        copy_samples = np.copy(samples)
        momentum = 0
        for index in range(self.num_steps):
            var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                          device=device,
                                          requires_grad=True)
            var_ys = tensor2variable(torch.LongTensor(ys), device=device)

            # obtain the gradient
            self.model.eval()
            preds = self.model(var_samples)
            loss_fun = torch.nn.CrossEntropyLoss()
            loss = loss_fun(preds, torch.max(var_ys, 1)[1])
            loss.backward()
            gradient = var_samples.grad.data.cpu().numpy()

            # update the momentum in the gradient direction
            momentum = self.decay_factor * momentum + gradient
            # update the (t+1) adversarial example
            copy_samples = copy_samples + self.epsilon_iter * np.sign(momentum)
            copy_samples = np.clip(copy_samples, samples - self.epsilon,
                                   samples + self.epsilon)
            copy_samples = np.clip(copy_samples, 0.0, 1.0)

        return copy_samples
Ejemplo n.º 5
0
    def perturbation(self, samples, ys_target, device):
        """

        :param samples:
        :param ys_target:
        :param device:
        :return:
        """
        copy_samples = np.copy(samples)

        var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                      device=device,
                                      requires_grad=True)
        var_ys_target = tensor2variable(torch.from_numpy(ys_target), device)

        self.model.eval()
        preds = self.model(var_samples)
        loss_fun = torch.nn.CrossEntropyLoss()
        loss = loss_fun(preds, var_ys_target)
        loss.backward()
        gradient_sign = var_samples.grad.data.cpu().sign().numpy()

        adv_samples = copy_samples - self.epsilon * gradient_sign
        adv_samples = np.clip(adv_samples, 0.0, 1.0)

        return adv_samples
Ejemplo n.º 6
0
    def perturbation(self, samples, ys, device):
        """

        :param samples:
        :param ys:
        :param device:
        :return:
        """

        copy_samples = np.copy(samples)
        self.model.to(device)
        # randomly chosen starting points inside the L_\inf ball around the
        copy_samples = copy_samples + np.random.uniform(
            -self.epsilon, self.epsilon, copy_samples.shape).astype('float32')

        for index in range(self.num_steps):
            var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                          device=device,
                                          requires_grad=True)
            var_ys = tensor2variable(torch.LongTensor(ys), device=device)

            self.model.eval()
            preds = self.model(var_samples)
            loss_fun = torch.nn.CrossEntropyLoss()
            loss = loss_fun(preds, torch.max(var_ys, 1)[1])
            loss.backward()

            gradient_sign = var_samples.grad.data.cpu().sign().numpy()
            copy_samples = copy_samples + self.epsilon_iter * gradient_sign

            copy_samples = np.clip(copy_samples, samples - self.epsilon,
                                   samples + self.epsilon)
            copy_samples = np.clip(copy_samples, 0.0, 1.0)

        return copy_samples
Ejemplo n.º 7
0
    def perturbation(self, samples, ys_targets, batch_size, device):
        """

        :param samples:
        :param ys_targets:
        :param batch_size:
        :param device:
        :return:
        """
        assert len(
            samples
        ) == batch_size, "the length of sample is not equal to the batch_size"

        copy_samples = np.copy(samples)
        var_samples = tensor2variable(torch.from_numpy(copy_samples),
                                      device=device)
        var_targets = tensor2variable(torch.LongTensor(ys_targets),
                                      device=device)

        # set the lower and upper bound for searching 'c' const
        const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const
        c_upper_bound = [1e10] * batch_size
        c_lower_bound = np.zeros(batch_size)

        # L2 norm attack
        best_l2 = [1e10] * batch_size
        best_perturbation = np.zeros(var_samples.size())
        current_prediction_class = [-1] * batch_size

        def attack_achieved(pre_softmax, target_class):
            return np.argmax(pre_softmax) == target_class

        # note that we should turn off the reduction when applying the loss function to a batch of samples
        loss_fun = torch.nn.CrossEntropyLoss(reduction='none')

        self.model.eval()
        # Outer loop for linearly searching for c
        for search_for_c in range(self.binary_search_steps):
            # the perturbation
            r = torch.zeros_like(var_samples).float()
            r = tensor2variable(r, device=device, requires_grad=True)

            # use LBFGS to optimize the perturbation r, with default learning rate parameter and other parameters
            optimizer = torch.optim.LBFGS([r], max_iter=self.max_iterations)
            var_const = tensor2variable(torch.FloatTensor(const_origin),
                                        device=device)
            print("\tbinary search step {}:".format(search_for_c))

            # The steps to be done when doing optimization iteratively.
            def closure():
                perturbed_images = torch.clamp(var_samples + r,
                                               min=0.0,
                                               max=1.0)
                prediction = self.model(perturbed_images)
                l2dist = torch.sum((perturbed_images - var_samples)**2,
                                   [1, 2, 3])
                constraint_loss = loss_fun(prediction, var_targets)
                loss_f = var_const * constraint_loss
                loss = l2dist.sum() + loss_f.sum(
                )  # minimize c|r| + loss_f(x+r,l), l is the target label, r is the perturbation
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                return loss

            optimizer.step(closure)

            perturbed_images = torch.clamp(var_samples + r, min=0.0, max=1.0)
            prediction = self.model(perturbed_images)
            l2dist = torch.sum((perturbed_images - var_samples)**2, [1, 2, 3])

            # the following is analogy to CW2 attack
            for i, (dist, score, perturbation) in enumerate(
                    zip(l2dist.data.cpu().numpy(),
                        prediction.data.cpu().numpy(),
                        perturbed_images.data.cpu().numpy())):
                if dist < best_l2[i] and attack_achieved(score, ys_targets[i]):
                    best_l2[i] = dist
                    current_prediction_class[i] = np.argmax(score)
                    best_perturbation[i] = perturbation

            # update the best constant c for each sample in the batch
            for i in range(batch_size):
                if current_prediction_class[i] == ys_targets[
                        i] and current_prediction_class[i] != -1:
                    c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin[i] = (c_lower_bound[i] +
                                           c_upper_bound[i]) / 2.
                else:
                    c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin = (c_lower_bound[i] +
                                        c_upper_bound[i]) / 2
                    else:
                        const_origin[i] *= 10
        return np.array(best_perturbation)
Ejemplo n.º 8
0
    def perturbation(self, samples, ys_targets, batch_size, device):
        """

        :param samples:
        :param ys_targets:
        :param batch_size:
        :param device:
        :return:
        """
        assert len(
            samples
        ) == batch_size, "the length of sample is not equal to the batch_size"

        # transform the samples [lower, upper] to [-1, 1] and then to the arctanh space
        mid_point = (self.upper_bound + self.lower_bound) * 0.5
        half_range = (self.upper_bound - self.lower_bound) * 0.5
        arctanh_samples = np.arctanh(
            (samples - mid_point) / half_range * 0.9999)
        var_samples = tensor2variable(torch.from_numpy(arctanh_samples),
                                      device=device,
                                      requires_grad=True)

        # set the lower and upper bound for searching 'c' const in the CW2 attack
        const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const
        c_upper_bound = [1e10] * batch_size
        c_lower_bound = np.zeros(batch_size)

        # convert targets to one hot encoder
        temp_one_hot_matrix = np.eye(10)
        targets_in_one_hot = []
        for i in range(batch_size):
            current_target = temp_one_hot_matrix[ys_targets[i]]
            targets_in_one_hot.append(current_target)
        targets_in_one_hot = tensor2variable(torch.FloatTensor(
            np.array(targets_in_one_hot)),
                                             device=device)

        best_l2 = [1e10] * batch_size
        best_perturbation = np.zeros(var_samples.size())
        current_prediction_class = [-1] * batch_size

        def attack_achieved(pre_softmax, target_class):
            pre_softmax[target_class] -= self.kappa
            return np.argmax(pre_softmax) == target_class

        self.model.eval()
        # Outer loop for linearly searching for c
        for search_for_c in range(self.binary_search_steps):

            modifier = torch.zeros(var_samples.size()).float()
            modifier = tensor2variable(modifier,
                                       device=device,
                                       requires_grad=True)
            optimizer = torch.optim.Adam([modifier], lr=self.learning_rate)
            var_const = tensor2variable(torch.FloatTensor(const_origin),
                                        device=device)
            print("\tbinary search step {}:".format(search_for_c))

            for iteration_times in range(self.max_iterations):
                # inverse the transform tanh -> [0, 1]
                perturbed_images = torch.tanh(
                    var_samples + modifier) * half_range + mid_point
                prediction = self.model(perturbed_images)

                l2dist = torch.sum(
                    (perturbed_images -
                     (torch.tanh(var_samples) * half_range + mid_point))**2,
                    [1, 2, 3])

                constraint_loss = torch.max(
                    (prediction - 1e10 * targets_in_one_hot).max(1)[0] -
                    (prediction * targets_in_one_hot).sum(1),
                    torch.ones(batch_size, device=device) * self.kappa * -1)

                loss_f = var_const * constraint_loss
                loss = l2dist.sum() + loss_f.sum(
                )  # minimize |r| + c * loss_f(x+r,l)
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                optimizer.step()

                # update the best l2 distance, current predication class as well as the corresponding adversarial example
                for i, (dist, score, img) in enumerate(
                        zip(l2dist.data.cpu().numpy(),
                            prediction.data.cpu().numpy(),
                            perturbed_images.data.cpu().numpy())):
                    if dist < best_l2[i] and attack_achieved(
                            score, ys_targets[i]):
                        best_l2[i] = dist
                        current_prediction_class[i] = np.argmax(score)
                        best_perturbation[i] = img

            # update the best constant c for each sample in the batch
            for i in range(batch_size):
                if current_prediction_class[i] == ys_targets[
                        i] and current_prediction_class[i] != -1:
                    c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin[i] = (c_lower_bound[i] +
                                           c_upper_bound[i]) / 2.0
                else:
                    c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin = (c_lower_bound[i] +
                                        c_upper_bound[i]) / 2.0
                    else:
                        const_origin[i] *= 10

        return np.array(best_perturbation)
Ejemplo n.º 9
0
    def perturbation(self, samples, true_label, batch_size, device):
        """

        :param samples:
        :param true_label:
        :param batch_size:
        :param device:
        :return:
        """
        assert len(
            samples
        ) == batch_size, "the length of sample is not equal to the batch_size"

        # initialize noise
        channel, width, length = samples.shape[1:]
        noise_raw = np.random.normal(scale=self.noise_magnitude,
                                     size=(channel * length * width,
                                           self.noise_count)).astype(
                                               np.float32)
        noise_unit_vector, _ = np.linalg.qr(
            noise_raw)  # turn the noises to orthogonal unit vectors using QR

        assert noise_unit_vector.shape[1] == self.noise_count
        # noise_vector = noise_unit_vector * np.sqrt(channel * width * length) * self.noise_magnitude
        noise_vector = noise_unit_vector * (
            1.0 / np.max(np.abs(noise_unit_vector))) * self.noise_magnitude
        noise_vector = noise_vector.transpose((1, 0)).reshape(
            (self.noise_count, channel, width, length))
        noise_vector[self.noise_count - 1] = 0  # set the last noise to 0
        noise_vector = tensor2variable(torch.from_numpy(noise_vector),
                                       device,
                                       requires_grad=False)

        # transform the samples [lower, upper] to [-1, 1] and then to the arctanh space
        mid_point = (self.upper_bound + self.lower_bound) * 0.5
        half_range = (self.upper_bound - self.lower_bound) * 0.5
        arctanh_samples = np.arctanh(
            (samples - mid_point) / half_range * 0.9999)
        var_samples = tensor2variable(torch.from_numpy(arctanh_samples),
                                      device=device,
                                      requires_grad=True)

        # set the lower and upper bound for searching 'c' in the OM attack
        const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const
        c_upper_bound = [1e10] * batch_size
        c_lower_bound = np.zeros(batch_size)

        # convert targets to one-hot encoder
        temp_one_hot_matrix = np.eye(10)
        labels_in_one_hot = []
        for i in range(batch_size):
            current_label = temp_one_hot_matrix[true_label[i]]
            labels_in_one_hot.append(current_label)
        labels_in_one_hot = tensor2variable(torch.FloatTensor(
            np.array(labels_in_one_hot)),
                                            device=device)

        best_l2 = [1e10] * batch_size
        best_perturbation = np.zeros(var_samples.size())
        current_prediction_class = [-1] * batch_size

        def un_targeted_attack_achieved(pre_softmax, true_class):
            pre_softmax[true_class] += self.kappa
            return np.argmax(pre_softmax) != true_class

        self.model.eval()
        # Outer loop for linearly searching for c
        for search_for_c in range(self.binary_search_steps):

            modifier = torch.zeros(var_samples.size()).float()
            modifier = tensor2variable(modifier,
                                       device=device,
                                       requires_grad=True)
            optimizer = torch.optim.Adam([modifier], lr=self.learning_rate)
            var_const = tensor2variable(torch.FloatTensor(const_origin),
                                        device=device)

            print("\tbinary search step {}:".format(search_for_c))
            for iteration_times in range(self.max_iterations):
                # inverse the transform tanh -> [0, 1]
                perturbed_img = torch.tanh(var_samples +
                                           modifier) * half_range + mid_point
                perturbed_img = torch.clamp(perturbed_img, min=0.0, max=1.0)

                perturbed_img_plus_noises = perturbed_img[
                    None, :, :, :, :] + noise_vector[:, None, :, :, :]
                perturbed_img_plus_noises = torch.clamp(
                    perturbed_img_plus_noises, min=0.0, max=1.0)
                # size = noise_count * batch_size * channel * width * height #

                # minimize |r| + c * loss_f(x+r,l)
                l2dist = torch.sum(
                    (perturbed_img -
                     (torch.tanh(var_samples) * half_range + mid_point))**2,
                    [1, 2, 3])

                loss = l2dist.clone()

                # add the 20 loss terms one by one
                for i in range(self.noise_count):
                    prediction = self.model(perturbed_img_plus_noises[i])
                    c_loss = torch.max(
                        (prediction * labels_in_one_hot).sum(1) -
                        (prediction - 1e10 * labels_in_one_hot).max(1)[0],
                        torch.ones(batch_size, device=device) * self.kappa *
                        -1)
                    loss += var_const * c_loss

                loss = loss.sum()
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                for i, (dist, score, img) in enumerate(
                        zip(l2dist.data.cpu().numpy(),
                            prediction.data.cpu().numpy(),
                            perturbed_img.data.cpu().numpy())):
                    if dist < best_l2[i] and un_targeted_attack_achieved(
                            score, true_label[i]):
                        best_l2[i] = dist
                        current_prediction_class[i] = np.argmax(score)
                        best_perturbation[i] = img

            for i in range(batch_size):
                if current_prediction_class[i] != true_label[
                        i] and current_prediction_class[i] != -1:
                    c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin[i] = (c_lower_bound[i] +
                                           c_upper_bound[i]) / 2.0
                else:
                    c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin = (c_lower_bound[i] +
                                        c_upper_bound[i]) / 2.0
                    else:
                        const_origin[i] *= 10

        return np.array(best_perturbation)
Ejemplo n.º 10
0
    def perturbation_single(self, sample, ys_target, device):
        """

        :param sample:
        :param ys_target:
        :param device:
        :return:
        """
        copy_sample = np.copy(sample)
        var_sample = tensor2variable(torch.from_numpy(copy_sample),
                                     device=device,
                                     requires_grad=True)
        var_target = tensor2variable(torch.LongTensor(ys_target),
                                     device=device)

        if self.theta > 0:
            increasing = True
        else:
            increasing = False

        num_features = int(np.prod(copy_sample.shape[1:]))
        shape = var_sample.size()

        # perturb two pixels in one iteration, thus max_iters is divided by 2.0
        max_iters = int(np.ceil(num_features * self.gamma / 2.0))

        # masked search domain, if the pixel has already reached the top or bottom, we don't bother to modify it.
        if increasing:
            search_domain = torch.lt(var_sample, 0.99).to(device)
        else:
            search_domain = torch.gt(var_sample, 0.01).to(device)
        search_domain = search_domain.view(num_features)

        self.model.eval().to(device)
        output = self.model(var_sample)
        current = torch.max(output.data, 1)[1].cpu().numpy()

        iter = 0
        while (iter < max_iters) and (current[0] != ys_target[0]) and (
                search_domain.sum() != 0):
            # calculate Jacobian matrix of forward derivative
            jacobian = self.compute_jacobian(input=var_sample, device=device)
            # get the saliency map and calculate the two pixels that have the greatest influence
            p1, p2 = self.saliency_map(jacobian, var_target, increasing,
                                       search_domain, num_features, device)
            # apply modifications
            var_sample_flatten = var_sample.view(-1, num_features)
            var_sample_flatten[0, p1] += self.theta
            var_sample_flatten[0, p2] += self.theta

            new_sample = torch.clamp(var_sample_flatten, min=0.0, max=1.0)
            new_sample = new_sample.view(shape)
            search_domain[p1] = 0
            search_domain[p2] = 0
            var_sample = tensor2variable(torch.tensor(new_sample),
                                         device=device,
                                         requires_grad=True)

            output = self.model(var_sample)
            current = torch.max(output.data, 1)[1].cpu().numpy()
            iter += 1

        adv_samples = var_sample.data.cpu().numpy()
        return adv_samples
Ejemplo n.º 11
0
    def perturbation_single(self, sample, device):
        """

        :param sample:
        :param device:
        :return:
        """
        assert sample.shape[0] == 1, 'only perturbing one sample'
        copy_sample = np.copy(sample)
        var_sample = tensor2variable(torch.from_numpy(copy_sample),
                                     device=device,
                                     requires_grad=True).float()

        self.model.eval()
        prediction = self.model(var_sample)
        original = torch.max(prediction, 1)[1]
        current = original

        # indices of predication in descending order
        I = np.argsort(prediction.data.cpu().numpy() * -1)
        perturbation_r_tot = np.zeros(copy_sample.shape, dtype=np.float32)
        iteration = 0
        while (original == current) and (iteration < self.max_iterations):

            # predication for the adversarial example in i-th iteration
            zero_gradients(var_sample)
            self.model.eval()
            f_kx = self.model(var_sample)
            current = torch.max(f_kx, 1)[1]
            # gradient of the original example
            f_kx[0, I[0, 0]].backward(retain_graph=True)
            grad_original = np.copy(var_sample.grad.data.cpu().numpy())

            # calculate the w_k and f_k for every class label
            closest_dist = 1e10
            for k in range(1, 10):
                # gradient of adversarial example for k-th label
                zero_gradients(var_sample)
                f_kx[0, I[0, k]].backward(retain_graph=True)
                grad_current = var_sample.grad.data.cpu().numpy().copy()
                # update w_k and f_k
                w_k = grad_current - grad_original
                f_k = (f_kx[0, I[0, k]] -
                       f_kx[0, I[0, 0]]).detach().data.cpu().numpy()
                # find the closest distance and the corresponding w_k
                dist_k = np.abs(f_k) / (np.linalg.norm(w_k.flatten()) + 1e-15)
                if dist_k < closest_dist:
                    closest_dist = dist_k
                    closest_w = w_k

            # accumulation of perturbation
            r_i = (closest_dist + 1e-4) * closest_w / np.linalg.norm(closest_w)
            perturbation_r_tot = perturbation_r_tot + r_i

            tmp_sample = np.clip(
                (1 + self.overshoot) * perturbation_r_tot + sample, 0.0, 1.0)
            var_sample = tensor2variable(torch.from_numpy(tmp_sample),
                                         device=device,
                                         requires_grad=True)

            iteration += 1

        adv = np.clip(sample + (1 + self.overshoot) * perturbation_r_tot, 0.0,
                      1.0)
        return adv, perturbation_r_tot, iteration
Ejemplo n.º 12
0
    def perturbation(self, samples, ys_targets, batch_size, device):

        assert len(
            samples
        ) == batch_size, "the length of sample is not equal to the batch_size"
        copy_sample = np.copy(samples)

        # help function
        def attack_achieved(pre_softmax, target_class):
            pre_softmax[target_class] -= self.kappa
            return np.argmax(pre_softmax) == target_class

        # help function: Iterative Shrinkage-Threshold-ing Algorithm
        def ISTA(new, old):
            with torch.no_grad():
                diff = new - old
                var_beta = torch.FloatTensor(
                    np.ones(shape=diff.shape, dtype=float) *
                    self.beta).to(device)
                # test if the perturbation is out of bound. If it is, then reduce the perturbation by beta
                cropped_diff = torch.max(
                    torch.abs(diff) - var_beta,
                    torch.zeros(diff.shape,
                                device=device)) * diff.sign().to(device)
                fist_new = old + cropped_diff
                return torch.clamp(input=fist_new, min=0.0, max=1.0)

        # transform the samples [lower_bound, upper_bound] to [0, 1]
        var_samples = tensor2variable(torch.from_numpy(samples),
                                      device=device,
                                      requires_grad=True)

        # set the lower and upper bound for searching 'c' const in EAD attack
        const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const
        c_upper_bound = [1e10] * batch_size
        c_lower_bound = np.zeros(batch_size)

        # convert targets to one hot encoder
        temp_one_hot_matrix = np.eye(10)
        targets_one_hot = []
        for i in range(batch_size):
            current_target = temp_one_hot_matrix[ys_targets[i]]
            targets_one_hot.append(current_target)
        targets_one_hot = torch.FloatTensor(
            np.array(targets_one_hot)).to(device)

        # initialize
        best_elastic = [1e10] * batch_size
        best_perturbation = np.zeros(var_samples.size())
        current_prediction_class = [-1] * batch_size

        flag = [False] * batch_size

        self.model.eval()
        # Outer loop for linearly searching for c
        for search_for_c in range(self.binary_search_steps):

            slack = tensor2variable(
                torch.from_numpy(copy_sample),
                device=device,
                requires_grad=True)  # The slack variable (y) of x
            optimizer_y = torch.optim.SGD([slack], lr=self.learning_rate)
            old_image = slack.clone(
            )  # Save the previous version of new_img in the iteration
            var_const = tensor2variable(torch.FloatTensor(const_origin),
                                        device=device)
            print("\tbinary search step {}:".format(search_for_c))

            for iteration_times in range(self.max_iterations):
                # optimize the slack variable
                output_y = self.model(slack).to(device)
                l2dist_y = torch.sum((slack - var_samples)**2, [1, 2, 3])
                kappa_t = torch.FloatTensor([self.kappa] *
                                            batch_size).to(device)
                target_loss_y = torch.max(
                    (output_y - 1e10 * targets_one_hot).max(1)[0] -
                    (output_y * targets_one_hot).sum(1), -1 * kappa_t)
                c_loss_y = var_const * target_loss_y
                loss_y = l2dist_y.sum() + c_loss_y.sum()

                optimizer_y.zero_grad()
                loss_y.backward()
                optimizer_y.step()

                # convert to new image and save the previous version
                new_image = ISTA(slack, var_samples)
                slack.data = new_image.data + ((iteration_times /
                                                (iteration_times + 3.0)) *
                                               (new_image - old_image)).data
                old_image = new_image.clone()

                # calculate the loss for decision
                output = self.model(new_image)
                l1dist = torch.sum(torch.abs(new_image - var_samples),
                                   [1, 2, 3])
                l2dist = torch.sum((new_image - var_samples)**2, [1, 2, 3])
                target_loss = torch.max(
                    (output - 1e10 * targets_one_hot).max(1)[0] -
                    (output * targets_one_hot).sum(1), -1 * kappa_t)

                if self.EN:
                    decision_loss = self.beta * l1dist + l2dist + var_const * target_loss
                else:
                    decision_loss = self.beta * l1dist + var_const * target_loss

                # Update best results
                for i, (dist, score, img) in enumerate(
                        zip(decision_loss.data.cpu().numpy(),
                            output.data.cpu().numpy(),
                            new_image.data.cpu().numpy())):
                    if dist < best_elastic[i] and attack_achieved(
                            score, ys_targets[i]):
                        best_elastic[i] = dist
                        current_prediction_class[i] = np.argmax(score)
                        best_perturbation[i] = img
                        flag[i] = True

            # update the best constant c for each sample in the batch
            for i in range(batch_size):
                if current_prediction_class[i] == ys_targets[
                        i] and current_prediction_class[i] != -1:
                    c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin[i] = (c_lower_bound[i] +
                                           c_upper_bound[i]) / 2.0
                else:
                    c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])
                    if c_upper_bound[i] < 1e10:
                        const_origin = (c_lower_bound[i] +
                                        c_upper_bound[i]) / 2.0
                    else:
                        const_origin[i] *= 10

        cnt = 0
        for i in range(batch_size):
            if flag[i]:
                cnt += 1
        print("Success: {}".format(cnt))

        return np.array(best_perturbation)