Пример #1
0
 def load_seed_data(self) -> dict[str, torch.Tensor]:
     seed_path = os.path.join(self.folder_path, f'seed_{self.seed_num}.npy')
     seed_data: dict[str, torch.Tensor] = {}
     seed_data = np.load(seed_path, allow_pickle=True).item() if os.path.exists(seed_path) \
         else self.save_seed_data()
     seed_data['input'] = to_tensor(seed_data['input'])
     seed_data['label'] = to_tensor(seed_data['label'], dtype=torch.long)
     return seed_data
Пример #2
0
 def load_seed_data(self) -> dict[str, torch.Tensor]:
     seed_path = f'{env["result_dir"]}{self.dataset.name}/{self.name}_{self.seed_num}.npy'
     seed_data: dict[str, torch.Tensor] = {}
     seed_data = np.load(seed_path, allow_pickle=True).item() if os.path.exists(seed_path) \
         else self.save_seed_data()
     seed_data['input'] = to_tensor(seed_data['input'])
     seed_data['label'] = to_tensor(seed_data['label'], dtype=torch.long)
     return seed_data
Пример #3
0
 def load(self, path: str = None):
     if path is None:
         path = self.folder_path + self.get_filename() + '.npz'
     _dict = np.load(path)
     self.attack.mark.mark = to_tensor(_dict['mark_list'][self.target_class])
     self.attack.mark.alpha_mask = to_tensor(_dict['mask_list'][self.target_class])
     self.attack.mark.mask = torch.ones_like(self.attack.mark.mark, dtype=torch.bool)
     self.attack.mark.random_pos = False
     self.attack.mark.height_offset = 0
     self.attack.mark.width_offset = 0
Пример #4
0
 def get_data(self, data: tuple[torch.Tensor], v_noise: float = None, mode='train'):
     if v_noise is None:
         v_noise = self.v_noise
     _input = data[0]
     if mode == 'train':
         # future warning: to_tensor, to_valid_img
         noise: torch.Tensor = torch.normal(mean=0.0, std=v_noise, size=_input.shape)
         data[0] = (_input + noise).clamp(0.0, 1.0)
         data[1] = _input.detach()
     else:
         data[0] = _input.detach()
         data[1] = _input.clone().detach()
     return to_tensor(data[0]), to_tensor(data[1])
Пример #5
0
    def load(self, path: str = None):

        if path is None:
            path = self.folder_path + self.get_filename(
                target_class=self.target_class) + '_best.npy'
        _dict = np.load(path, allow_pickle=True).item()
        self.attack.mark.mark = to_tensor(_dict[self.target_class]['mark'])
        self.attack.mark.alpha_mask = to_tensor(
            _dict[self.target_class]['mask'])
        self.attack.mark.mask = torch.ones_like(self.attack.mark.mark,
                                                dtype=torch.bool)
        self.attack.mark.random_pos = False
        self.attack.mark.height_offset = 0
        self.attack.mark.width_offset = 0
Пример #6
0
 def load(self, path: str = None):
     if path is None:
         path = os.path.join(self.folder_path, self.get_filename() + '.npz')
     _dict = np.load(path)
     self.attack.mark.mark = to_tensor(
         _dict['mark_list'][self.target_class])
     self.attack.mark.alpha_mask = to_tensor(
         _dict['mask_list'][self.target_class])
     self.attack.mark.mask = torch.ones_like(self.attack.mark.mark,
                                             dtype=torch.bool)
     self.attack.mark.random_pos = False
     self.attack.mark.height_offset = 0
     self.attack.mark.width_offset = 0
     print('defense results loaded from: ', path)
Пример #7
0
    def preprocess_mark(self, data: dict[str, tuple[torch.Tensor,
                                                    torch.Tensor]]):
        other_x, _ = data['other']
        other_set = TensorDataset(other_x)
        other_loader = self.dataset.get_dataloader(mode='train',
                                                   dataset=other_set,
                                                   num_workers=0)

        atanh_mark = torch.randn_like(self.mark.mark) * self.mark.mask
        atanh_mark.requires_grad_()
        self.mark.mark = tanh_func(atanh_mark)
        optimizer = optim.Adam([atanh_mark], lr=self.preprocess_lr)
        optimizer.zero_grad()

        losses = AverageMeter('Loss', ':.4e')
        for _epoch in range(self.preprocess_epoch):
            loader = other_loader
            for (batch_x, ) in loader:
                poison_x = self.mark.add_mark(to_tensor(batch_x))
                loss = self.loss_mse(poison_x)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()
                self.mark.mark = tanh_func(atanh_mark)
                losses.update(loss.item(), n=len(batch_x))
        atanh_mark.requires_grad = False
        self.mark.mark.detach_()
Пример #8
0
    def __init__(self,
                 name: str = None,
                 model: Union[type[_Model], _Model] = _Model,
                 dataset: Dataset = None,
                 num_classes: int = None,
                 folder_path: str = None,
                 official: bool = False,
                 pretrain: bool = False,
                 randomized_smooth: bool = False,
                 rs_sigma: float = 0.01,
                 rs_n: int = 100,
                 suffix: str = '',
                 **kwargs):
        self.param_list: dict[str, list[str]] = {}
        self.param_list['model'] = ['folder_path']
        if suffix:
            self.param_list['model'].append('suffix')
        if randomized_smooth:
            self.param_list['model'].extend(
                ['randomized_smooth', 'rs_sigma', 'rs_n'])
        self.name: str = name
        self.dataset = dataset
        self.suffix = suffix
        self.randomized_smooth: bool = randomized_smooth
        self.rs_sigma: float = rs_sigma
        self.rs_n: int = rs_n

        self.folder_path = folder_path
        if folder_path is not None:
            self.folder_path = os.path.normpath(folder_path)
            if not os.path.exists(folder_path):
                os.makedirs(folder_path)

        # ------------Auto-------------- #
        loss_weights: np.ndarray = None if 'loss_weights' not in kwargs.keys(
        ) else kwargs['loss_weights']
        if dataset:
            if not isinstance(dataset, Dataset):
                raise TypeError(f'{type(dataset)=}    {dataset=}')
            num_classes = num_classes if num_classes is not None else dataset.num_classes
            loss_weights = loss_weights if 'loss_weights' in kwargs.keys(
            ) else dataset.loss_weights
        self.num_classes = num_classes  # number of classes
        self.loss_weights = loss_weights  # TODO: what device shall we save loss_weights? numpy, torch, or torch.cuda.

        # ------------------------------ #
        self.criterion = self.define_criterion(weight=to_tensor(loss_weights))
        if isinstance(model, type):
            self._model = model(num_classes=num_classes, **kwargs)
        else:
            self._model = model
        self.model = self.get_parallel_model(self._model)
        self.activate_params([])
        if official:
            self.load('official')
        if pretrain:
            self.load()
        self.eval()
        if env['num_gpus']:
            self.cuda()
Пример #9
0
 def load(self, path: str = None):
     if path is None:
         path = os.path.join(
             self.folder_path,
             self.get_filename(target_class=self.target_class) +
             '_best.npy')
     _dict: dict[str,
                 dict[str,
                      torch.Tensor]] = np.load(path,
                                               allow_pickle=True).item()
     self.attack.mark.mark = to_tensor(_dict[self.target_class]['mark'])
     self.attack.mark.alpha_mask = to_tensor(
         _dict[self.target_class]['mask'])
     self.attack.mark.mask = torch.ones_like(self.attack.mark.mark,
                                             dtype=torch.bool)
     self.attack.mark.random_pos = False
     self.attack.mark.height_offset = 0
     self.attack.mark.width_offset = 0
     print('defense results loaded from: ', path)
Пример #10
0
    def load(self, path: str = None):
        if path is None:
            path = self.folder_path + self.get_filename() + '.npz'
        _dict = np.load(path, allow_pickle=True)
        self.attack.mark.mark = to_tensor(_dict['mark_list'][self.attack.target_class])
        self.attack.mark.random_pos = False
        self.attack.mark.height_offset = 0
        self.attack.mark.width_offset = 0

        def add_mark_fn(_input, **kwargs):
            return _input + self.attack.mark.mark.to(_input.device)
        self.attack.mark.add_mark_fn = add_mark_fn
Пример #11
0
 def get_data(self, data: tuple[torch.Tensor, torch.Tensor], org: bool = False, keep_org: bool = True, poison_label=True, **kwargs) -> tuple[torch.Tensor, torch.Tensor]:
     if org:
         _input, _label = self.model.get_data(data)
     else:
         _input, _label = self.attack.get_data(data=data, keep_org=keep_org, poison_label=poison_label, **kwargs)
     h, w = _input.shape[-2], _input.shape[-1]
     _input_list = []
     for single_input in _input:
         image = to_pil_image(single_input)
         image = F.resize(image, (int(h * self.resize_ratio), int(w * self.resize_ratio)), Image.ANTIALIAS)
         image = F.resize(image, (h, w))
         _input_list.append(to_tensor(image))
     return torch.stack(_input_list), _label
Пример #12
0
 def get_loss_weights(self, file_path: str = None, verbose: bool = None) -> np.ndarray:
     file_path = file_path if file_path is not None else os.path.join(self.folder_path, 'loss_weights.npy')
     if os.path.exists(file_path):
         loss_weights = to_tensor(np.load(file_path), dtype='float')
         return loss_weights
     else:
         if verbose:
             print('Calculating Loss Weights')
         dataset = self.get_full_dataset('train', transform=None)
         _, targets = dataset_to_list(dataset, label_only=True)
         loss_weights = np.bincount(targets)     # TODO: linting problem
         assert len(loss_weights) == self.num_classes
         loss_weights: np.ndarray = loss_weights.sum() / loss_weights     # TODO: linting problem
         np.save(file_path, loss_weights)
         print('Loss Weights Saved at ', file_path)
         return loss_weights
Пример #13
0
    def preprocess_mark(self, data: dict[str, tuple[torch.Tensor,
                                                    torch.Tensor]]):
        other_x, _ = data['other']
        other_set = TensorDataset(other_x)
        other_loader = self.dataset.get_dataloader(mode='train',
                                                   dataset=other_set,
                                                   num_workers=0)

        atanh_mark = torch.randn_like(self.mark.mark) * self.mark.mask
        atanh_mark.requires_grad_()
        self.mark.mark = tanh_func(atanh_mark)
        optimizer = optim.Adam([atanh_mark], lr=self.preprocess_lr)
        optimizer.zero_grad()

        losses = AverageMeter('Loss', ':.4e')
        for _epoch in range(self.preprocess_epoch):
            # epoch_start = time.perf_counter()
            loader = other_loader
            # if env['tqdm']:
            #     loader = tqdm(loader)
            for (batch_x, ) in loader:
                poison_x = self.mark.add_mark(to_tensor(batch_x))
                loss = self.loss_mse(poison_x)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()
                self.mark.mark = tanh_func(atanh_mark)
                losses.update(loss.item(), n=len(batch_x))
            # epoch_time = str(datetime.timedelta(seconds=int(
            #     time.perf_counter() - epoch_start)))
            # pre_str = '{blue_light}Epoch: {0}{reset}'.format(
            #     output_iter(_epoch + 1, self.preprocess_epoch), **ansi).ljust(64 if env['color'] else 35)
            # _str = ' '.join([
            #     f'Loss: {losses.avg:.4f},'.ljust(20),
            #     f'Time: {epoch_time},'.ljust(20),
            # ])
            # prints(pre_str, _str, prefix='{upline}{clear_line}'.format(**ansi) if env['tqdm'] else '', indent=4)
        atanh_mark.requires_grad = False
        self.mark.mark.detach_()
Пример #14
0
    def trigger_detect(self, _input: torch.Tensor):
        """
        Args:
            _input (torch.Tensor): (N, C, H, W)

        """
        # get dominant color
        dom_c_list = []
        for img in _input:
            dom_c: torch.Tensor = self.get_dominant_colour(img)  # (C)
            dom_c_list.append(dom_c)
        dom_c = torch.stack(dom_c_list).unsqueeze(-1).unsqueeze(
            -1)  # (N, C, 1, 1)

        # generate random numbers
        height, width = _input.shape[-2:]
        pos_height: torch.Tensor = torch.randint(
            low=0,
            high=height - self.size[0],
            size=[len(_input), self.sample_num])  # (N, sample_num)
        pos_width: torch.Tensor = torch.randint(
            low=0,
            high=width - self.size[1],
            size=[len(_input), self.sample_num])  # (N, sample_num)
        pos_list: torch.Tensor = torch.stack(
            [pos_height, pos_width]).transpose(0, -1)  # (N, sample_num, 2)
        # block potential triggers on _input
        block_input = _input.unsqueeze(1).repeat(1, self.sample_num, 1, 1,
                                                 1)  # (N, sample_num, C, H, W)
        for i in range(len(_input)):
            for j in range(self.sample_num):
                x = pos_list[i][j][0]
                y = pos_list[i][j][1]
                block_input[i, j, :, x:x + self.size[0],
                            y:y + self.size[1]] = dom_c[i]
        # get potential triggers
        _input = to_tensor(_input)
        block_input = to_tensor(block_input)
        org_class = self.model.get_class(_input).unsqueeze(1).repeat(
            1, self.sample_num)  # (N, sample_num)
        block_class_list = []
        for i in range(self.sample_num):
            block_class = self.model.get_class(
                block_input[:, i])  # (N, sample_num)
            block_class_list.append(block_class)
        block_class = torch.stack(block_class_list, dim=1)
        potential_idx: torch.Tensor = org_class.eq(
            block_class).detach().cpu()  # (N, sample_num)

        # confirm triggers
        result_list = torch.zeros(len(_input), dtype=torch.bool)
        mask_shape = [_input.shape[0], _input.shape[-2], _input.shape[-1]]
        mask_list = torch.zeros(mask_shape,
                                dtype=torch.float)  # (N, C, height, width)
        mark_class = self.attack.mark
        for i in range(len(_input)):
            print(f'input {i:3d}')
            pos_pairs = pos_list[i][~potential_idx[i]]  # (*, 2)
            if len(pos_pairs) == 0:
                continue
            for j, pos in enumerate(pos_pairs):
                self.attack.mark.height_offset = pos[0]
                self.attack.mark.width_offset = pos[1]
                mark_class.org_mark = _input[i, :,
                                             pos[0]:pos[0] + self.size[0],
                                             pos[1]:pos[1] + self.size[1]]
                mark_class.org_mask = torch.ones(self.size, dtype=torch.bool)
                mark_class.org_alpha_mask = torch.ones(self.size,
                                                       dtype=torch.float)
                mark_class.mark, mark_class.mask, mark_class.alpha_mask = mark_class.mask_mark(
                    height_offset=pos[0], width_offset=pos[1])
                target_acc = self.confirm_backdoor()
                output_str = f'    {j:3d}  Acc: {target_acc:5.2f}'
                if not self.attack.mark.random_pos:
                    overlap = jaccard_idx(mark_class.mask.detach().cpu(),
                                          self.real_mask.detach().cpu(),
                                          select_num=self.size[0] *
                                          self.size[1])
                    output_str += f'  Jaccard Idx: {overlap:5.3f}'
                print(output_str)
                if target_acc > self.threshold_t:
                    result_list[i] = True
                    mask_list[i] = mark_class.mask
        return result_list, mask_list
Пример #15
0
 def nc_filter_img(self) -> torch.Tensor:
     h, w = self.dataset.n_dim
     mask = torch.ones(h, w, dtype=torch.float)
     return to_tensor(mask, non_blocking=False)
Пример #16
0
 def filter_img(self):
     h, w = self.dataset.n_dim
     mask = torch.zeros(h, w, dtype=torch.float)
     mask[2:7, 2:7] = 1
     return to_tensor(mask, non_blocking=False)
Пример #17
0
    def save_img(self, img_path: str):
        img = self.org_mark * self.org_mask if self.random_pos else self.mark * self.mask
        save_tensor_as_img(img_path, img)

    def load_npz(self, npz_path: str):
        if not os.path.exists(npz_path) and not os.path.exists(
                npz_path := os.path.join(dir_path, npz_path)):
            raise FileNotFoundError(npz_path.removeprefix(dir_path))
        _dict = np.load(npz_path)
        if not self.mark_distributed:
            self.org_mark = torch.as_tensor(_dict['org_mark'])
            self.org_mask = torch.as_tensor(_dict['org_mask'])
            self.org_alpha_mask = torch.as_tensor(_dict['org_alpha_mask'])
        if not self.random_pos:
            self.mark = to_tensor(_dict['mark'])
            self.mask = to_tensor(_dict['mask'])
            self.alpha_mask = to_tensor(_dict['alpha_mask'])

    def save_npz(self, npz_path: str):
        _dict = {}
        if not self.mark_distributed:
            _dict |= {
                'org_mark': to_numpy(self.org_mark),
                'org_mask': to_numpy(self.org_mask),
                'org_alpha_mask': to_numpy(self.org_alpha_mask)
            }
        if not self.random_pos:
            _dict |= {
                'mark': to_numpy(self.mark),
                'mask': to_numpy(self.mask),
Пример #18
0
 def get_data(data: tuple[torch.Tensor, torch.Tensor],
              **kwargs) -> tuple[torch.Tensor, torch.Tensor]:
     return to_tensor(data[0]), to_tensor(data[1], dtype='long')