コード例 #1
0
    def testBackwardRgbColor(self, stride, height, width, datatype):
        rtol, atol = 1e-3, 1e-6

        # Parameters
        nSamples = 8
        nrows = int(math.ceil(height /
                              stride[Direction.VERTICAL]))  #.astype(int)
        ncols = int(math.ceil(width /
                              stride[Direction.HORIZONTAL]))  #.astype(int)
        nDecs = stride[0] * stride[1]  # math.prod(stride)
        nComponents = 3  # RGB

        # Source (nSamples x nComponents x (Stride[0]xnRows) x (Stride[1]xnCols))
        X = torch.rand(nSamples,
                       nComponents,
                       height,
                       width,
                       dtype=datatype,
                       requires_grad=True)
        # nSamples x nRows x nCols x nDecs
        dLdZr = torch.rand(nSamples, nrows, ncols, nDecs, dtype=datatype)
        dLdZg = torch.rand(nSamples, nrows, ncols, nDecs, dtype=datatype)
        dLdZb = torch.rand(nSamples, nrows, ncols, nDecs, dtype=datatype)

        # Expected values
        Ar = permuteIdctCoefs_(dLdZr, stride)
        Ag = permuteIdctCoefs_(dLdZg, stride)
        Ab = permuteIdctCoefs_(dLdZb, stride)
        Yr = dct.idct_2d(Ar, norm='ortho')
        Yg = dct.idct_2d(Ag, norm='ortho')
        Yb = dct.idct_2d(Ab, norm='ortho')
        expctddLdX = torch.cat(
            (Yr.reshape(nSamples, 1, height,
                        width), Yg.reshape(nSamples, 1, height, width),
             Yb.reshape(nSamples, 1, height, width)),
            dim=1)

        # Instantiation of target class
        layer = NsoltBlockDct2dLayer(decimation_factor=stride,
                                     number_of_components=nComponents,
                                     name='E0')

        # Actual values
        Zr, Zg, Zb = layer.forward(X)
        Zr.backward(dLdZr, retain_graph=True)
        Zg.backward(dLdZg, retain_graph=True)
        Zb.backward(dLdZb, retain_graph=False)
        actualdLdX = X.grad

        # Evaluation
        self.assertEqual(actualdLdX.dtype, datatype)
        self.assertTrue(
            torch.allclose(actualdLdX, expctddLdX, rtol=rtol, atol=atol))
        self.assertTrue(Zr.requires_grad)
        self.assertTrue(Zg.requires_grad)
        self.assertTrue(Zb.requires_grad)
コード例 #2
0
    def testPredictRgbColor(self, stride, height, width, datatype):
        rtol, atol = 1e-5, 1e-8

        # Parameters
        nSamples = 8
        nrows = int(math.ceil(height / stride[Direction.VERTICAL]))
        ncols = int(math.ceil(width / stride[Direction.HORIZONTAL]))
        nDecs = stride[0] * stride[1]  # math.prod(stride)
        nComponents = 3  # RGB
        # nSamples x nRows x nCols x nDecs
        Xr = torch.rand(nSamples,
                        nrows,
                        ncols,
                        nDecs,
                        dtype=datatype,
                        requires_grad=True)
        Xg = torch.rand(nSamples,
                        nrows,
                        ncols,
                        nDecs,
                        dtype=datatype,
                        requires_grad=True)
        Xb = torch.rand(nSamples,
                        nrows,
                        ncols,
                        nDecs,
                        dtype=datatype,
                        requires_grad=True)

        # Expected values
        Ar = permuteIdctCoefs_(Xr, stride)
        Ag = permuteIdctCoefs_(Xg, stride)
        Ab = permuteIdctCoefs_(Xb, stride)
        Yr = dct.idct_2d(Ar, norm='ortho')
        Yg = dct.idct_2d(Ag, norm='ortho')
        Yb = dct.idct_2d(Ab, norm='ortho')
        expctdZ = torch.cat(
            (Yr.reshape(nSamples, 1, height,
                        width), Yg.reshape(nSamples, 1, height, width),
             Yb.reshape(nSamples, 1, height, width)),
            dim=1)

        # Instantiation of target class
        layer = NsoltBlockIdct2dLayer(decimation_factor=stride,
                                      number_of_components=nComponents,
                                      name='E0~')

        # Actual values
        with torch.no_grad():
            actualZ = layer.forward(Xr, Xg, Xb)

        # Evaluation
        self.assertEqual(actualZ.dtype, datatype)
        self.assertTrue(torch.allclose(actualZ, expctdZ, rtol=rtol, atol=atol))
        self.assertFalse(actualZ.requires_grad)
コード例 #3
0
 def forward(self, x):
     X = dct.dct_2d(x)
     mask = torch.zeros_like(X)
     mask[:, :, 0:self.fre, 0:self.fre] = 1
     out = dct.idct_2d(X * mask)
     out = self.model(out)
     return out
コード例 #4
0
def test_idct_2d():
    for N1 in [2, 5, 32]:
        for N2 in [2, 5, 32]:
            x = np.random.normal(size=(1, N1, N2))
            X = dct.dct_2d(torch.tensor(x))
            y = dct.idct_2d(X).numpy()
            assert np.abs(x - y).max() < EPS, x
コード例 #5
0
ファイル: viewmaker.py プロジェクト: ranliu98/viewmaker
    def forward(self, x):
        if self.downsample_to:
            # Downsample.
            x_orig = x
            x = torch.nn.functional.interpolate(
                x, size=(self.downsample_to, self.downsample_to), mode='bilinear')
        y = x
        
        if self.frequency_domain:
            # Input to viewmaker is in frequency domain, outputs frequency domain perturbation.
            # Uses the Discrete Cosine Transform.
            # shape still [batch_size, C, W, H]
            y = dct.dct_2d(y)

        y_pixels, features = self.basic_net(y, self.num_res_blocks, bound_multiplier=1)
        delta = self.get_delta(y_pixels)
        if self.frequency_domain:
            # Compute inverse DCT from frequency domain to time domain.
            delta = dct.idct_2d(delta)
        if self.downsample_to:
            # Upsample.
            x = x_orig
            delta = torch.nn.functional.interpolate(delta, size=x_orig.shape[-2:], mode='bilinear')

        # Additive perturbation
        result = x + delta
        if self.clamp:
            result = torch.clamp(result, 0, 1.0)

        return result
コード例 #6
0
    def testForwardGrayScale(self, stride, height, width, datatype):
        rtol, atol = 1e-5, 1e-8

        # Parameters
        nSamples = 8
        nrows = int(math.ceil(height / stride[Direction.VERTICAL]))
        ncols = int(math.ceil(width / stride[Direction.HORIZONTAL]))
        nDecs = stride[0] * stride[1]  # math.prod(stride)
        nComponents = 1
        # nSamples x nRows x nCols x nDecs
        X = torch.rand(nSamples,
                       nrows,
                       ncols,
                       nDecs,
                       dtype=datatype,
                       requires_grad=True)

        # Expected values
        A = permuteIdctCoefs_(X, stride)
        Y = dct.idct_2d(A, norm='ortho')
        expctdZ = Y.reshape(nSamples, nComponents, height, width)

        # Instantiation of target class
        layer = NsoltBlockIdct2dLayer(decimation_factor=stride, name='E0~')

        # Actual values
        actualZ = layer.forward(X)

        # Evaluation
        self.assertEqual(actualZ.dtype, datatype)
        self.assertTrue(torch.allclose(actualZ, expctdZ, rtol=rtol, atol=atol))
        self.assertTrue(actualZ.requires_grad)
コード例 #7
0
def dct_cutoff_low(x, bandwidth):
    if len(x.size()) == 2:
        x.unsqueeze_(0)

    mask = torch.ones_like(x)
    mask[:, :bandwidth, :bandwidth] = 0
    return torch_dct.idct_2d(torch_dct.dct_2d(x, norm='ortho') * mask, norm='ortho').squeeze_()
コード例 #8
0
    def __init__(self, originals: ep.Tensor, random_noise: str = "normal", basis_type: str = "dct", **kwargs : Any):
        """
        Args:
            random_noise (str, optional): When basis is created, a noise will be added.This noise can be normal or 
                                          uniform. Defaults to "normal".
            basis_type (str, optional): Type of the basis: DCT, Random, Genetic,. Defaults to "random".
            device (int, optional): [description]. Defaults to -1.
            args, kwargs: In args and kwargs, there is the basis params:
                    * Random: No parameters                    
                    * DCT:
                            * function (tanh / constant / linear): function applied on the dct
                            * beta
                            * gamma
                            * frequence_range: tuple of 2 float
                            * dct_type: 8x8 or full
        """
        self._originals = originals
        if isinstance(self._originals.raw, torch.Tensor):
            self._f_dct2 = lambda a: torch_dct.dct_2d(a)
            self._f_idct2 = lambda a: torch_dct.idct_2d(a)
        elif isinstance(v.raw, np.array):
            from scipy import fft
            self._f_dct2 = lambda a: fft.dct(fft.dct(a, axis=2, norm='ortho' ), axis=3, norm='ortho')
            self._f_idct2 = lambda a: fft.idct(fft.idct(a, axis=2, norm='ortho'), axis=3, norm='ortho')

        self.basis_type = basis_type
        self._function_generation = getattr(self, "_get_vector_" + self.basis_type)
        self._load_params(**kwargs)

        assert random_noise in ["normal", "uniform"]
        self.random_noise = random_noise
コード例 #9
0
ファイル: utils.py プロジェクト: amodas/hold-me-tight
def generate_subspace_list(subspace_dim, dim, subspace_step, channels):
    subspace_list = []
    idx_i = 0
    idx_j = 0
    while (idx_i + subspace_dim - 1 <= dim - 1) and (idx_j + subspace_dim - 1
                                                     <= dim - 1):

        S = torch.zeros((subspace_dim, subspace_dim, dim, dim),
                        dtype=torch.float32).to(DEVICE)
        for i in range(subspace_dim):
            for j in range(subspace_dim):
                dirac = torch.zeros((dim, dim),
                                    dtype=torch.float32,
                                    device=DEVICE)
                dirac[idx_i + i, idx_j + j] = 1.
                S[i, j] = torch_dct.idct_2d(dirac, norm='ortho')

        Sp = S.view(subspace_dim * subspace_dim, dim * dim)
        if channels > 1:
            Sp = kron(torch.eye(channels, dtype=torch.float32, device=DEVICE),
                      Sp)

        Sp = Sp.t()

        Sp = Sp.to('cpu')
        subspace_list.append(Sp)

        idx_i += subspace_step
        idx_j += subspace_step

    return subspace_list
コード例 #10
0
def dct_low_pass(x, bandwidth):
    if len(x.size()) == 2:
        x.unsqueeze_(0)

    mask = torch.zeros_like(x)
    mask[:, :bandwidth, :bandwidth] = 1
    return torch_dct.idct_2d(torch_dct.dct_2d(x, norm='ortho') * mask, norm='ortho').squeeze_()
コード例 #11
0
def dtw_loss(originals,
             deltas,
             targets,
             criterion,
             attentions=None,
             is_cuda=False,
             test=False):
    loss = 0
    preds = []
    for i, o in enumerate(originals):

        length = o.shape[1]
        org = torch.from_numpy(o).T.unsqueeze(0)
        targ = torch.from_numpy(targets[i]).T.unsqueeze(0)

        if length > deltas[i].shape[1]:
            m = torch.nn.ZeroPad2d((0, length - deltas[i].shape[1], 0, 0))
            delt = dct.idct_2d(m(deltas[i]).T.unsqueeze(0))
        else:
            delt = dct.idct_2d(deltas[i, :, :length].T.unsqueeze(0))

        if attentions is not None:
            delt = torch.mul(delt, attentions[i].T.unsqueeze(0))

        out = org + delt

        if is_cuda:
            out = out.cuda()
            targ = targ.cuda()

        crit = criterion(
            out, targ) - 1 / 2 * (criterion(out, out) + criterion(targ, targ))
        loss += crit

        if test:
            preds.append(out[0].detach().numpy().T)

    if test:
        return loss, preds
    else:
        return loss
コード例 #12
0
    def decode(self, dct_vectors, dim=None):
        """
        intput: dct_vector numpy [N,dct_dim]
        output: mask_rc mask reconstructed [N, mask_size, mask_size]
        """
        device = dct_vectors.device
        if dim is None:
            dct_vector_coords = self.dct_vector_coords[:self.vec_dim]
        else:
            dct_vector_coords = self.dct_vector_coords[:dim]
            dct_vectors = dct_vectors[:, :dim]

        N = dct_vectors.shape[0]
        dct_trans = torch.zeros([N, self.mask_size, self.mask_size], dtype=dct_vectors.dtype).to(device)
        xs, ys = dct_vector_coords[:, 0], dct_vector_coords[:, 1]
        dct_trans[:, xs, ys] = dct_vectors
        mask_rc = torch_dct.idct_2d(dct_trans, norm='ortho')  # [N, mask_size, mask_size]
        return mask_rc
コード例 #13
0
 def forward(self, *args):
     block_size = self.decimation_factor
     for iComponent in range(self.num_inputs):
         X = args[iComponent]
         nsamples = X.size(0)
         nrows = X.size(1)
         ncols = X.size(2)
         # Permute IDCT coefficients
         V = permuteIdctCoefs_(X, block_size)
         # 2D IDCT
         Y = dct.idct_2d(V, norm='ortho')
         # Reshape and return
         height = nrows * block_size[Direction.VERTICAL]
         width = ncols * block_size[Direction.HORIZONTAL]
         if iComponent < 1:
             Z = Y.reshape(nsamples, 1, height, width)
         else:
             Z = torch.cat((Z, Y.reshape(nsamples, 1, height, width)),
                           dim=1)
     return Z
コード例 #14
0
    def testBackwardGrayScale(self, stride, height, width, datatype):
        rtol, atol = 1e-3, 1e-6

        # Parameters
        nSamples = 8
        nrows = int(math.ceil(height /
                              stride[Direction.VERTICAL]))  #.astype(int)
        ncols = int(math.ceil(width /
                              stride[Direction.HORIZONTAL]))  #.astype(int)
        nDecs = stride[0] * stride[1]  # math.prod(stride)
        nComponents = 1

        # Source (nSamples x nComponents x (Stride[0]xnRows) x (Stride[1]xnCols))
        X = torch.rand(nSamples,
                       nComponents,
                       height,
                       width,
                       dtype=datatype,
                       requires_grad=True)
        # nSamples x nRows x nCols x nDecs
        dLdZ = torch.rand(nSamples, nrows, ncols, nDecs, dtype=datatype)

        # Expected values
        A = permuteIdctCoefs_(dLdZ, stride)
        Y = dct.idct_2d(A, norm='ortho')
        expctddLdX = Y.reshape(nSamples, nComponents, height, width)

        # Instantiation of target class
        layer = NsoltBlockDct2dLayer(decimation_factor=stride, name='E0')

        # Actual values
        Z = layer.forward(X)
        Z.backward(dLdZ)
        actualdLdX = X.grad

        # Evaluation
        self.assertEqual(actualdLdX.dtype, datatype)
        self.assertTrue(
            torch.allclose(actualdLdX, expctddLdX, rtol=rtol, atol=atol))
        self.assertTrue(Z.requires_grad)
コード例 #15
0
ファイル: DeepS3PR_Demo.py プロジェクト: gxd123/DeepS3PR
 def Psi(x):
     X = dct.idct_2d(
         x, norm='ortho'
     )  #This will apply an inverse DCT of x
     return X
コード例 #16
0
    def forward(self, x):
        y = dct.dct_2d(x, 'ortho')
        y = y[..., :self.h, :self.w]
        y = dct.idct_2d(y, 'ortho')

        return y
コード例 #17
0
def to_spatial(x):
    return dct.idct_2d(x)
コード例 #18
0
def foolbox_attack(filter=None,
                   filter_preserve='low',
                   free_parm='eps',
                   plot_num=None):
    # get model.
    model = get_model()
    model = nn.DataParallel(model).to(device)
    model = model.eval()

    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    if plot_num:
        free_parm = ''
        val_loader = get_val_loader(plot_num)
    else:
        # Load images.
        val_loader = get_val_loader(args.attack_batch_size)

    if 'eps' in free_parm:
        epsilons = [0.001, 0.003, 0.005, 0.008, 0.01, 0.1]
    else:
        epsilons = [0.01]
    if 'step' in free_parm:
        steps = [1, 5, 10, 30, 40, 50]
    else:
        steps = [args.iteration]

    for step in steps:
        # Adversarial attack.
        if args.attack_type == 'LinfPGD':
            attack = LinfPGD(steps=step)
        elif args.attack_type == 'FGSM':
            attack = FGSM()

        clean_acc = 0.0

        for i, data in enumerate(val_loader, 0):

            # Samples (attack_batch_size * attack_epochs) images for adversarial attack.
            if i >= args.attack_epochs:
                break

            images, labels = data[0].to(device), data[1].to(device)
            if step == steps[0]:
                clean_acc += (get_acc(
                    fmodel, images, labels
                )) / args.attack_epochs  # accumulate for attack epochs.

            _images, _labels = ep.astensors(images, labels)
            raw_advs, clipped_advs, success = attack(fmodel,
                                                     _images,
                                                     _labels,
                                                     epsilons=epsilons)

            if plot_num:
                grad = torch.from_numpy(
                    raw_advs[0].numpy()).to(device) - images
                grad = grad.clone().detach_()
                return grad

            if filter:
                robust_accuracy = torch.empty(len(epsilons))
                for eps_id in range(len(epsilons)):
                    grad = torch.from_numpy(
                        raw_advs[eps_id].numpy()).to(device) - images
                    grad = grad.clone().detach_()
                    freq = dct.dct_2d(grad)
                    if filter_preserve == 'low':
                        mask = torch.zeros(freq.size()).to(device)
                        mask[:, :, :filter, :filter] = 1
                    elif filter_preserve == 'high':
                        mask = torch.zeros(freq.size()).to(device)
                        mask[:, :, filter:, filter:] = 1
                    masked_freq = torch.mul(freq, mask)
                    new_grad = dct.idct_2d(masked_freq)
                    x_adv = torch.clamp(images + new_grad, 0, 1).detach_()

                    robust_accuracy[eps_id] = (get_acc(fmodel, x_adv, labels))
            else:
                robust_accuracy = 1 - success.float32().mean(axis=-1)
            if i == 0:
                robust_acc = robust_accuracy / args.attack_epochs
            else:
                robust_acc += robust_accuracy / args.attack_epochs

        if step == steps[0]:
            print("sample size is : ",
                  args.attack_batch_size * args.attack_epochs)
            print(f"clean accuracy:  {clean_acc * 100:.1f} %")
            print(
                f"Model {args.model} robust accuracy for {args.attack_type} perturbations with"
            )
        for eps, acc in zip(epsilons, robust_acc):
            print(
                f"  Step {step}, Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %"
            )
        print('  -------------------')
コード例 #19
0
def main(video_file, pose_dict, model):

    is_cuda = torch.cuda.is_available()

    # ============== 3D pose estimation ============== #
    poses = main_VIBE(video_file, model)
    # with open('PoseCorrection/Results/poses_vibe.pickle', 'rb') as f:
    #     poses = pickle.load(f)

    # ============== Squeleton uniformization ============== #
    poses_uniform = centralize_normalize_rotate_poses(poses, pose_dict)
    joints = list(range(15)) + [19, 21, 22, 24]
    poses_reshaped = poses_uniform[:, :, joints]
    poses_reshaped = poses_reshaped.reshape(
        -1, poses_reshaped.shape[1] * poses_reshaped.shape[2]).T

    frames = poses_reshaped.shape[1]

    # ============== Input ============== #
    dct_n = 25
    if frames >= dct_n:
        inputs = dct.dct_2d(poses_reshaped)[:, :dct_n]
    else:
        inputs = dct.dct_2d(
            torch.nn.ZeroPad2d((0, dct_n - frames, 0, 0))(poses_reshaped))

    if is_cuda:
        inputs = inputs.cuda()

    # ============== Action recognition ============== #
    model_class = GCN_class()
    model_class.load_state_dict(
        torch.load('PoseCorrection/Data/model_class.pt'))

    if is_cuda:
        model_class.cuda()

    model_class.eval()
    with torch.no_grad():
        _, label = torch.max(model_class(inputs).data, 1)

    # ============== Motion correction ============== #
    model_corr = GCN_corr()
    model_corr.load_state_dict(torch.load('PoseCorrection/Data/model_corr.pt'))

    if is_cuda:
        model_corr.cuda()

    with torch.no_grad():
        model_corr.eval()
        deltas_dct, att = model_corr(inputs)

        if frames > dct_n:
            m = torch.nn.ZeroPad2d((0, frames - dct_n, 0, 0))
            deltas = dct.idct_2d(m(deltas_dct).transpose(1, 2))
        else:
            deltas = dct.idct_2d(deltas_dct[:, :frames].transpose(1, 2))

        poses_corrected = poses_reshaped + deltas.squeeze().squeeze().T

    # ============== Action recognition ============== #
    with torch.no_grad():
        _, label_corr = torch.max(model_class(inputs + deltas_dct).data, 1)

    return poses_reshaped, poses_corrected, label, label_corr
コード例 #20
0
def SimBA(max_iters=3 * 32 * 32,
          freq_dims=4,
          stride=1,
          epsilon=0.2,
          targeted=False,
          pixel_attack=False,
          image_size=32):
    all_queries = torch.empty(0)
    all_l2 = torch.empty(0)
    all_idx = torch.empty(0)
    success = 0
    total = 1e-10
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            if batch_idx % 10 != 0:
                continue
            total += len(targets)
            inputs, targets = inputs.to(device), targets.to(device)
            images = inputs.clone()
            # target or untarget attack
            if targeted:
                labels = torch.randint_like(targets, 10)
                while labels.eq(targets).sum() > 0:
                    labels[labels.eq(targets)] = torch.randint_like(
                        targets[labels.eq(targets)], 10)
            else:
                labels = targets.clone()

            adv = inputs.clone()
            # the original points will be queried, so it is 1
            queries = torch.ones(len(targets))

            if pixel_attack:
                indices = torch.randperm(3 * freq_dims * freq_dims)[:max_iters]
            else:
                indices = simbaf.block_order(image_size,
                                             3,
                                             initial_size=freq_dims,
                                             stride=stride)[:max_iters]
            for k in range(max_iters):
                dim = indices[k]
                c, w, h = simbaf.location(
                    image_size, dim)  # return the location of the dim
                outputs, net_idx = net(adv)
                loss = CWloss(outputs, labels)
                outputs = F.softmax(outputs, dim=1)

                if targeted:
                    remaining = loss <= 0
                    PASS = loss > 0
                else:
                    remaining = loss > 0
                    PASS = loss <= 0

                if PASS.sum() > 0:
                    all_queries = torch.cat([all_queries, queries[PASS]])
                    l2 = (adv[PASS] - images[PASS]).view(PASS.sum(),
                                                         -1).norm(2, 1)
                    all_l2 = torch.cat([all_l2, l2.cpu()])
                    all_idx = torch.cat(
                        [all_idx, net_idx.repeat(PASS.sum()).float()])
                    success += float(PASS.sum())
                    adv = adv[remaining].clone()
                    images = images[remaining].clone()
                    labels = labels[remaining].clone()
                    outputs = outputs[remaining].clone()
                    queries = queries[remaining].clone()

                # check if all images are misclassified and stop early
                if remaining.sum() == 0:
                    break

                diff = torch.zeros_like(images)
                diff[:, c, w, h] = 1  # bs * c * w * h
                if not pixel_attack:
                    diff = dct.idct_2d(diff).clone()
                diff = diff / diff.view(diff.shape[0], -1).norm(2, 1).view(
                    -1, 1, 1, 1) * epsilon

                left_adv = (adv - diff).clamp(0, 1)
                left_outputs, _ = net(left_adv)
                left_outputs = F.softmax(left_outputs, dim=1)
                idx = left_outputs[range(len(labels)), labels] < outputs[range(
                    len(labels)), labels]
                if targeted:
                    idx = ~idx

                adv[idx] = left_adv[idx].clone()
                # only increase query count further by 1 for images
                # that did not improve in adversarial loss
                queries += 1
                queries[~idx] += 1

                right_adv = (adv + diff).clamp(0, 1)
                right_outputs, _ = net(right_adv)
                right_outputs = F.softmax(right_outputs, dim=1)
                idx2 = right_outputs[range(len(
                    labels)), labels] < outputs[range(len(labels)), labels]
                if targeted:
                    idx2 = ~idx2
                idx2[idx] = 0  # these points should not be queried or updated
                adv[idx2] = right_adv[idx2].clone()

            progress_bar(
                batch_idx / 10,
                len(testloader) / 10,
                'quirese: %.2f | l2: %.2f | success %.2f%%' %
                (all_queries.mean(), all_l2.mean(),
                 100. * success / float(total)))

    state = {
        'all_queries': all_queries,
        'all_l2': all_l2,
        'success': success,
        'total': total
    }
    if not os.path.isdir('./checkpoint'):
        os.mkdir('./checkpoint')
    ckpname = ('./checkpoint/' + args.name + '_simba.pth')
    torch.save(state, ckpname)
コード例 #21
0
def optimize(ux0, uy0, im1, im2, maxIter, lambda_r, to1, theta, device):
    eps = 0.0000001
    Ix, Iy = centralFiniteDifference(im1)
    It = im1 - im2

    a11 = Ix * Ix
    a12 = Ix * Iy
    a22 = Iy * Iy

    t1 = Ix * (It - Ix * ux0 - Iy * uy0)
    t2 = Iy * (It - Ix * ux0 - Iy * uy0)

    h, w = im1.size()

    vx = torch.zeros((h, w)).to(device).double()
    vy = torch.zeros((h, w)).to(device).double()
    bx = torch.zeros((h, w)).to(device).double()
    by = torch.zeros((h, w)).to(device).double()
    ux = torch.zeros((h, w)).to(device).double()
    uy = torch.zeros((h, w)).to(device).double()

    X, Y = torch.meshgrid([torch.arange(0, h), torch.arange(0, w)])
    # G = 2 * (torch.cos(PI * X / w + PI * Y / h) - 2)
    # G = G.to(device).double()

    X, Y = torch.meshgrid(torch.linspace(0, h - 1, h),
                          torch.linspace(0, w - 1, w))
    X, Y = X.cuda(), Y.cuda()
    G = torch.cos(math.pi * X / h) + torch.cos(math.pi * Y / w) - 2
    # G = G.unsqueeze(0).repeat(N, 1, 1, 1)

    for i in range(maxIter):
        tempx = ux
        tempy = uy

        h1 = theta * (vx - bx) - t1
        h2 = theta * (vy - by) - t2

        ux = ((a22 + theta) * h1 - a12 * h2) / ((a11 + theta) *
                                                (a22 + theta) - a12 * a12)
        uy = ((a11 + theta) * h2 - a12 * h1) / ((a11 + theta) *
                                                (a22 + theta) - a12 * a12)

        # vx = (idct2(dct2(theta * (ux + bx)) / (theta + lambda_r * G * G)))
        # vy = (idct2(dct2(theta * (uy + by)) / (theta + lambda_r * G * G)))

        vx = (tdct.idct_2d(
            tdct.dct_2d(theta * (ux + bx)) / (theta + lambda_r * G * G)))
        vy = (tdct.idct_2d(
            tdct.dct_2d(theta * (uy + by)) / (theta + lambda_r * G * G)))

        bx = bx + ux - vx
        by = by + uy - vy

        # t1 = Ix * (It - Ix * ux - Iy * uy)
        # t2 = Iy * (It - Ix * ux - Iy * uy)

        stopx = torch.sum(
            torch.abs(ux - tempx)) / (torch.sum(torch.abs(tempx)) + eps)
        stopy = torch.sum(
            torch.abs(uy - tempy)) / (torch.sum(torch.abs(tempy)) + eps)
        # print(i, stopx, stopy)
        if stopx < to1 and stopy < to1:
            print('iterate {} times, stop due to converge to tolerance'.format(
                i))
            break

    if i == maxIter - 1:
        print('iterate {} times, stop due to reach max iteration'.format(i))
    return ux, uy
コード例 #22
0
def dct_flip(x):
    return torch_dct.idct_2d(torch.flip(torch_dct.dct_2d(x, norm='ortho'), [-2, -1]), norm='ortho')
コード例 #23
0
def test(init=False):
    global u, T, num_queries, attack_success, num_sample, total_norm, avg_norm, avg_queries
    print('Init:', init)
    for batch_idx, (images, lables_true) in enumerate(testloader):
        if (init and batch_idx >= 200):
            break
        if (not init and batch_idx < 200):
            continue
        images, lables_true = images.to(device), lables_true.to(device)
        outputs = net(images)
        _, predicted = outputs.max(1)
        lables = torch.randint(10, [1]).to(device)
        while (lables == predicted or lables == lables_true):
            lables = torch.randint(10, [1]).to(device)
        num_queries += 1
        num_sample += 1
        trail = 0  #num of trail for this sample
        if not init:
            u_bar = u + (3 * torch.log(T.sum()) / 2 / (T + 0.01))**0.5
        adv = images.clone()  #adv images
        while trail <= (fre / n - 1):
            trail += 1
            trail_queries = 0  #num of query for this trail
            if init:
                idx = torch.randperm(fre)[:n]
            else:
                _, idx = torch.topk(u_bar, fre)
                idx = idx[(trail - 1) * n:trail * n]

            stay = 0  #if stay>100 then change the fre we select
            loss = cri(outputs, lables) - lam * (adv - images).norm()

            for i in range(steps):
                noise = torch.randn_like(images)
                mask = torch.zeros_like(images)
                mask = add_mask(idx, mask)
                g = dct.idct_2d(noise * mask)
                g = g / g.norm() * stepsize  #the gradient
                outputs_p = net((adv + g).clamp(0, 1))  #positive direction
                num_queries += 1
                trail_queries += 1
                loss_p = cri(outputs_p,
                             lables) - lam * (adv + g - images).norm()
                outputs_n = net((adv - g).clamp(0, 1))  #negative direction
                num_queries += 1
                trail_queries += 1
                loss_n = cri(outputs_n,
                             lables) - lam * (adv - g - images).norm()
                #print(loss, loss_p, loss_n)
                if (loss_n > loss or loss_p > loss):
                    stay = 0
                    if loss_p >= loss_n:
                        loss = loss_p
                        adv = (adv + g).clamp(0, 1)
                        _, predicted = outputs_p.max(1)
                        if predicted == lables:
                            attack_success += 1
                            break
                    elif loss_p < loss_n:
                        loss = loss_n
                        adv = (adv - g).clamp(0, 1)
                        _, predicted = outputs_n.max(1)
                        if predicted == lables:
                            attack_success += 1
                            break
                else:
                    stay += 1
                    if stay > 100:
                        i = steps - 1
                        break

            reward = torch.tensor((1 - trail_queries / 600.) * H).clamp(0., H)
            if (i < 499 and trail > 1):
                reward += .8
                reward.clamp_(0., H)
            u[idx] = (u[idx] * T[idx] + reward) / (T[idx] + 1)
            #print(u, trail_queries, reward)
            T[idx] += 1
            if i < 499:
                total_norm += (adv - images).norm()
                avg_queries.append(1. * num_queries / num_sample)
                avg_norm.append(1. * total_norm / num_sample)
                progress_bar(
                    batch_idx, len(testloader),
                    'avg_queries: %.2f | success: %.2f%% | avg_norm: %.2f' %
                    (1. * num_queries / num_sample, 100. * attack_success /
                     num_sample, 1. * total_norm / num_sample))
                break