def output_available_models(class_dict: dict[str, type[Model]] = {}, indent: int = 0) -> None: names_dict = get_available_models(class_dict) for k in sorted(names_dict.keys()): prints('{yellow}{k}{reset}'.format(k=k, **ansi), indent=indent) prints(names_dict[k], indent=indent + 10) print()
def output_info(self, *args, mode: str = 'start', _iter: int = 0, iteration: int = 0, output: Iterable[str] = None, indent: int = None, **kwargs): r"""Output information. Args: mode (str): The output mode (e.g., ``'start', 'end', 'middle', 'memory'``). Should be legal strings in :meth:`get_output_int()`. Defaults to ``'start'``. _iter (int): Current iteration. Defaults to ``0``. iteration (int): Total iteration. Defaults to ``0``. output (~collections.abc.Iterable[str]): Output items. Defaults to :attr:`self.output`. indent (int): The space indent for the entire string. Defaults to :attr:`self.indent`. *args: Any positional argument (unused). **kwargs: Any keyword argument (unused). """ output = output if output is not None else self.output indent = indent if indent is not None else self.indent if mode in ['start', 'end']: prints(f'{self.name} Optimize {mode}', indent=indent) elif mode in ['middle']: prints(self.output_iter(name=self.name, _iter=_iter, iteration=iteration), indent=indent + 4) if 'memory' in output: output_memory(indent=indent + 4)
def update_input(self, current_idx: torch.Tensor, adv_input: torch.Tensor, org_input: torch.Tensor, noise: torch.Tensor, pgd_alpha: float | torch.Tensor, pgd_eps: float | torch.Tensor, add_noise_fn: Callable[..., torch.Tensor], clip_min: float | torch.Tensor, clip_max: float | torch.Tensor, loss_fn: Callable[[torch.Tensor], torch.Tensor], output: list[str], *args, loss_kwargs: dict[str, torch.Tensor] = {}, **kwargs): current_loss_kwargs = {k: v[current_idx] for k, v in loss_kwargs.items()} grad = self.calc_grad(loss_fn, adv_input[current_idx], loss_kwargs=current_loss_kwargs) if self.grad_method != 'white' and 'middle' in output: real_grad = self.whitebox_grad(loss_fn, adv_input[current_idx], loss_kwargs=current_loss_kwargs) prints('cos<real, est> = ', F.cosine_similarity(grad.sign().flatten(), real_grad.sign().flatten()), indent=self.indent + 2) if self.universal: grad = grad.mean(dim=0) noise[current_idx] = (noise[current_idx] - pgd_alpha * torch.sign(grad)) noise[current_idx] = self.projector(noise[current_idx], pgd_eps, norm=self.norm) adv_input[current_idx] = add_noise_fn(x=org_input[current_idx], noise=noise[current_idx], universal=self.universal, clip_min=clip_min, clip_max=clip_max) noise[current_idx] = self.valid_noise(adv_input[current_idx], org_input[current_idx])
def save(self, file_path: str = None, folder_path: str = None, suffix: str = None, component: str = '', verbose: bool = False, indent: int = 0, **kwargs): with torch.no_grad(): if file_path is None: folder_path = folder_path if folder_path is not None else self.folder_path suffix = suffix if suffix is not None else self.suffix file_path = os.path.normpath( os.path.join(folder_path, f'{self.name}{suffix}.pth')) else: folder_path = os.path.dirname(file_path) if not os.path.exists(folder_path): os.makedirs(folder_path) # TODO: type annotation might change? dict[str, torch.Tensor] module = self._model if component == 'features': module = self._model.features elif component == 'classifier': module = self._model.classifier else: assert component == '', f'{component=}' _dict: OrderedDict[str, torch.Tensor] = module.state_dict( prefix=component) torch.save(_dict, file_path, **kwargs) if verbose: prints(f'Model {self.name} saved at: {file_path}', indent=indent)
def load(self, file_path: str = None, folder_path: str = None, suffix: str = None, map_location: Union[str, Callable, torch.device, dict] = 'default', component: str = '', strict: bool = True, verbose: bool = False, indent: int = 0, **kwargs): map_location = map_location if map_location != 'default' else env['device'] if file_path is None: folder_path = folder_path if folder_path is not None else self.folder_path suffix = suffix if suffix is not None else self.suffix file_path = os.path.normpath(os.path.join(folder_path, f'{self.name}{suffix}.pth')) if file_path == 'official': # TODO _dict = self.get_official_weights(map_location=map_location) last_bias_value = next(reversed(_dict.values())) # TODO: make sure if self.num_classes != len(last_bias_value) and component != 'features': strict = False _dict.popitem() _dict.popitem() else: try: # TODO: type annotation might change? dict[str, torch.Tensor] _dict: OrderedDict[str, torch.Tensor] = torch.load(file_path, map_location=map_location, **kwargs) except Exception as e: print(f'{file_path=}') raise e module = self._model if component == 'features': module = self._model.features _dict = OrderedDict([(key.removeprefix('features.'), value) for key, value in _dict.items()]) elif component == 'classifier': module = self._model.classifier _dict = OrderedDict([(key.removeprefix('classifier.'), value) for key, value in _dict.items()]) else: assert component == '', f'{component=}' module.load_state_dict(_dict, strict=strict) if verbose: prints(f'Model {self.name} loaded from: {file_path}', indent=indent)
def validate_fn(self, get_data_fn: Callable[..., tuple[torch.Tensor, torch.Tensor]] = None, loss_fn: Callable[..., torch.Tensor] = None, main_tag: str = 'valid', indent: int = 0, **kwargs) -> tuple[float, float]: _, clean_acc = self.combined_model._validate( print_prefix='Validate Clean', main_tag='valid clean', get_data_fn=None, indent=indent, **kwargs) _, target_acc = self.combined_model._validate( print_prefix='Validate Trigger Tgt', main_tag='valid trigger target', get_data_fn=self.get_data, keep_org=False, poison_label=True, indent=indent, **kwargs) self.combined_model._validate(print_prefix='Validate Trigger Org', main_tag='', get_data_fn=self.get_data, keep_org=False, poison_label=False, indent=indent, **kwargs) prints(f'Validate Confidence: {self.validate_confidence():.3f}', indent=indent) prints(f'Neuron Jaccard Idx: {self.check_neuron_jaccard():.3f}', indent=indent) if self.clean_acc - clean_acc > 3 and self.clean_acc > 40: # TODO: better not hardcoded target_acc = 0.0 return clean_acc, target_acc
def output_info(self, mode='start', _iter=0, iteration=0, **kwargs): if mode in ['start', 'end']: prints(f'{self.name} Optimize {mode}', indent=self.indent) elif mode in ['middle']: self.output_iter(name=self.name, _iter=_iter, iteration=iteration, indent=self.indent + 4) if 'memory' in self.output: output_memory(indent=self.indent + 4)
def validate_fn(self, get_data_fn: Callable[..., tuple[torch.Tensor, torch.Tensor]] = None, loss_fn: Callable[..., torch.Tensor] = None, main_tag: str = 'valid', indent: int = 0, **kwargs) -> tuple[float, float]: _, clean_acc = self.model._validate(print_prefix='Validate Clean', main_tag='valid clean', get_data_fn=None, indent=indent, **kwargs) target_acc = 100.0 for i, (mark, target_class) in enumerate(self.mark_list): _, acc = self.model._validate( print_prefix=f'Validate Trigger {i} target {target_class} ', main_tag='', get_data_fn=self.get_poison_data, mark=mark, target_class=target_class, indent=indent, **kwargs) target_acc = min(acc, target_acc) prints(f'Validate Confidence: {self.validate_confidence():.3f}', indent=indent) prints(f'Neuron Jaccard Idx: {self.check_neuron_jaccard():.3f}', indent=indent) if self.clean_acc - clean_acc > 3 and self.clean_acc > 40: # TODO: better not hardcoded target_acc = 0.0 return clean_acc, target_acc
def output_info(self, org_input: torch.Tensor, noise: torch.Tensor, target: torch.Tensor, loss_fn: Callable[[torch.Tensor], torch.Tensor] = None, **kwargs): super().output_info(org_input=org_input, noise=noise, loss_fn=loss_fn, **kwargs) # prints('Original class : ', _label, indent=self.indent) # prints('Original confidence: ', _confidence, indent=self.indent) _confidence = self.model.get_target_prob(org_input + noise, target) prints('Target class : ', target.detach().cpu().tolist(), indent=self.indent) prints('Target confidence: ', _confidence.detach().cpu().tolist(), indent=self.indent)
def summary(self, indent: int = 0): r"""Output information of :attr:`self`. Args: indent (int): The space indent for the entire string. Defaults to ``0``. """ prints(self, indent=indent)
def summary(self, depth: int = 2, verbose: bool = True, indent: int = 0, **kwargs): prints('{blue_light}{0:<20s}{reset} Parameters: '.format(self.name, **ansi), indent=indent) for key, value in self.param_list.items(): prints('{green}{0:<20s}{reset}'.format(key, **ansi), indent=indent + 10) prints({v: getattr(self, v) for v in value}, indent=indent + 10) prints('-' * 20, indent=indent + 10) self.output_layer_information(self._model, depth=depth, verbose=verbose, indent=indent + 10, **kwargs) prints('-' * 20, indent=indent + 10)
def output_info(self, real_params: torch.Tensor, loss_fn: Callable[..., torch.Tensor] = None, **kwargs): super().output_info(**kwargs) with torch.no_grad(): loss = float(loss_fn(*real_params)) prints(f'loss: {loss:.5f}', indent=self.indent)
def output_info(self, org_input: torch.Tensor, noise: torch.Tensor, *args, loss_fn: Callable[[torch.Tensor], torch.Tensor] = None, loss_kwargs: dict[str, torch.Tensor] = {}, **kwargs): super().output_info(*args, **kwargs) loss = float(loss_fn(org_input + noise, **loss_kwargs)) norm = noise.norm(p=self.norm) prints(f'L-{self.norm} norm: {norm} loss: {loss:.5f}', indent=self.indent)
def joint_train(self, epoch: int = 0, optimizer: optim.Optimizer = None, lr_scheduler: optim.lr_scheduler._LRScheduler = None, poison_loader=None, discrim_loader=None, save=False, **kwargs): in_dim = self.model._model.classifier[0].in_features D = nn.Sequential( OrderedDict([('fc1', nn.Linear(in_dim, 256)), ('bn1', nn.BatchNorm1d(256)), ('relu1', nn.LeakyReLU()), ('fc2', nn.Linear(256, 128)), ('bn2', nn.BatchNorm1d(128)), ('relu2', nn.ReLU()), ('fc3', nn.Linear(128, 2))])) if env['num_gpus']: D.cuda() optim_params: list[nn.Parameter] = [] for param_group in optimizer.param_groups: optim_params.extend(param_group['params']) optimizer.zero_grad() best_acc = 0.0 losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') for _epoch in range(epoch): self.discrim_train(epoch=100, D=D, discrim_loader=discrim_loader) self.model.train() self.model.activate_params(optim_params) for data in poison_loader: optimizer.zero_grad() _input, _label_f, _label_d = self.bypass_get_data(data) out_f = self.model(_input) loss_f = self.model.criterion(out_f, _label_f) out_d = D(self.model.get_final_fm(_input)) loss_d = self.model.criterion(out_d, _label_d) loss = loss_f - self.lambd * loss_d loss.backward() optimizer.step() optimizer.zero_grad() if lr_scheduler: lr_scheduler.step() self.model.activate_params([]) self.model.eval() _, cur_acc = self.validate_fn(get_data_fn=self.bypass_get_data) if cur_acc >= best_acc: prints('best result update!', indent=0) prints( f'Current Acc: {cur_acc:.3f} Previous Best Acc: {best_acc:.3f}', indent=0) best_acc = cur_acc if save: self.save() print('-' * 50)
def optimize(self, _input: torch.Tensor, noise: torch.Tensor = None, pgd_alpha: float = None, pgd_eps: float = None, iteration: int = None, loss_fn: Callable[[torch.Tensor], torch.Tensor] = None, output: Union[int, list[str]] = None, add_noise_fn=None, random_init: bool = False, **kwargs) -> tuple[torch.Tensor, int]: # ------------------------------ Parameter Initialization ---------------------------------- # pgd_alpha = pgd_alpha if pgd_alpha is not None else self.pgd_alpha pgd_eps = pgd_eps if pgd_eps is not None else self.pgd_eps iteration = iteration if iteration is not None else self.iteration loss_fn = loss_fn if loss_fn is not None else self.loss_fn add_noise_fn = add_noise_fn if add_noise_fn is not None else add_noise if random_init: noise = pgd_alpha * (torch.rand_like(_input) * 2 - 1) else: noise = noise if noise is not None else torch.zeros_like(_input[0] if self.universal else _input) output = self.get_output(output) # ----------------------------------------------------------------------------------------- # if 'start' in output: self.output_info(_input=_input, noise=noise, mode='start', loss_fn=loss_fn, **kwargs) if iteration == 0 or pgd_alpha == 0.0 or pgd_eps == 0.0: return _input, None X = add_noise_fn(_input=_input, noise=noise, batch=self.universal) # ----------------------------------------------------------------------------------------- # for _iter in range(iteration): if self.early_stop_check(X=X, loss_fn=loss_fn, **kwargs): if 'end' in output: self.output_info(_input=_input, noise=noise, mode='end', loss_fn=loss_fn, **kwargs) return X.detach(), _iter + 1 if self.grad_method == 'hess' and _iter % self.hess_p == 0: self.hess = self.calc_hess(loss_fn, X, sigma=self.sigma, hess_b=self.hess_b, hess_lambda=self.hess_lambda) self.hess /= self.hess.norm(p=2) grad = self.calc_grad(loss_fn, X) if self.grad_method != 'white' and 'middle' in output: real_grad = self.whitebox_grad(loss_fn, X) prints('cos<real, est> = ', cos_sim(grad.sign(), real_grad.sign()), indent=self.indent + 2) if self.universal: grad = grad.mean(dim=0) noise.data = (noise - pgd_alpha * torch.sign(grad)).data noise.data = self.projector(noise, pgd_eps, norm=self.norm).data X = add_noise_fn(_input=_input, noise=noise, batch=self.universal) if self.universal: noise.data = (X - _input).mode(dim=0)[0].data else: noise.data = (X - _input).data if 'middle' in output: self.output_info(_input=_input, noise=noise, mode='middle', _iter=_iter, iteration=iteration, loss_fn=loss_fn, **kwargs) if 'end' in output: self.output_info(_input=_input, noise=noise, mode='end', loss_fn=loss_fn, **kwargs) return X.detach(), None
def inference(self, _input: torch.Tensor, target: torch.Tensor): # ------------------------------- Init --------------------------------- # torch.manual_seed(env['seed']) if 'start' in self.output: self.attack.output_info(_input=_input, noise=torch.zeros_like(_input), target=target, loss_fn=lambda _X: self.model.loss(_X, target)) self.attack_grad_list: list[torch.Tensor] = [] # ------------------------ Attacker Seq -------------------------------- # seq = self.get_seq(_input, target) # Attacker cluster sequences (iter, query_num+1, C, H, W) seq_centers, seq_bias = self.get_center_bias(seq) # Defender cluster center estimate # seq_centers: (iter, 1, C, H, W) seq_bias: (iter) # seq_centers = seq[:, 0] # debug if 'start' in self.output: mean_error = (seq_centers[:, 0] - seq[:, 0]).abs().flatten(start_dim=1).amax(dim=1) print('Mean Shift Distance: '.ljust(25) + f'avg {mean_error.mean():<10.5f} min {mean_error.min():<10.5f} max {mean_error.max():<10.5f}') print('Bias Estimation: '.ljust(25) + f'avg {seq_bias.mean():<10.5f} min {seq_bias.min():<10.5f} max {seq_bias.max():<10.5f}') # candidate_centers = self.get_candidate_centers(seq, seq_centers, seq_bias) # abandoned # candidate_centers = seq_centers detect_result = self.get_detect_result(seq_centers, target=target) attack_result = self.model(seq[:, 0]).argmax(dim=1).detach().cpu() attack_succ = self.attack.iteration detect_succ = self.attack.iteration detect_true = True for i in range(self.attack.iteration - 1): if attack_result[i] == target and \ attack_result[min(i + 1, self.attack.iteration - 2)] == target and \ attack_succ == self.attack.iteration: attack_succ = i if (detect_true and detect_result[i] == detect_result[min(i + 1, self.attack.iteration - 2)] and detect_succ == self.attack.iteration): if detect_result[i] == target: detect_succ = i else: detect_true = False if 'end' in self.output: # print('candidate centers: ', [len(i) for i in candidate_centers]) print('Detect Iter: ', detect_succ) prints(detect_result.tolist(), indent=12) print('Attack Iter: ', attack_succ) prints(attack_result.tolist(), indent=12) print() result = ['draw'] * (self.attack.iteration - 1) if attack_succ < detect_succ: for i in range(attack_succ, self.attack.iteration - 1): result[i] = 'lose' elif attack_succ > detect_succ: for i in range(detect_succ, self.attack.iteration - 1): result[i] = 'win' elif attack_succ == detect_succ: pass else: raise ValueError() return result, detect_result, attack_result, attack_succ, detect_succ
def output_info(self, _input: torch.Tensor, noise: torch.Tensor, target: torch.Tensor, **kwargs): super(PGD, self).output_info(_input, noise, **kwargs) # prints('Original class : ', to_list(_label), indent=self.indent) # prints('Original confidence: ', to_list(_confidence), indent=self.indent) with torch.no_grad(): _prob: torch.Tensor = self.model._model.softmax( self.model._model.classifier(_input + noise)) _confidence = _prob.gather(dim=1, index=target.unsqueeze(1)).flatten() prints('Target class : ', to_list(target), indent=self.indent) prints('Target confidence: ', to_list(_confidence), indent=self.indent)
def output_info(self, _input: torch.Tensor, noise: torch.Tensor, target: torch.Tensor, **kwargs): super().output_info(_input, noise, **kwargs) # prints('Original class : ', to_list(_label), indent=self.indent) # prints('Original confidence: ', to_list(_confidence), indent=self.indent) with torch.no_grad(): _confidence = self.model.get_target_prob(_input + noise, target) prints('Target class : ', to_list(target), indent=self.indent) prints('Target confidence: ', to_list(_confidence), indent=self.indent)
def validate_target(self, indent: int = 0, verbose=True) -> tuple[float, float]: self.model.eval() _output = self.model(self.temp_input) asr, _ = self.model.accuracy(_output, self.temp_label, topk=(1, 5)) conf = float(self.model.get_target_prob(self.temp_input, self.temp_label).mean()) target_loss = self.model.loss(self.temp_input, self.temp_label) if verbose: prints(f'Validate Target: Loss: {target_loss:10.4f} ' f'Confidence: {conf:10.4f} ASR: {asr:7.3f}', indent=indent) return asr, conf
def output_info(self, _input: torch.Tensor, noise: torch.Tensor, loss_fn=None, **kwargs): super().output_info(**kwargs) with torch.no_grad(): loss = float(loss_fn(_input + noise)) norm = noise.norm(p=self.norm) prints(f'L-{self.norm} norm: {norm} loss: {loss:.5f}', indent=self.indent)
def summary(self, indent: int = 0): prints('{blue_light}{0:<30s}{reset} Parameters: '.format( self.name, **ansi), indent=indent) prints(self.__class__.__name__, indent=indent) for key in self.param_list: value = getattr(self, key) if value: prints('{green}{0:<10s}{reset}'.format(key, **ansi), indent=indent + 10) prints(value, indent=indent + 10) prints('-' * 20, indent=indent + 10)
def summary(self, indent: int = 0): prints('{blue_light}{0:<30s}{reset} Parameters: '.format( self.name, **ansi), indent=indent) prints(self.__class__.__name__, indent=indent) for key, value in self.param_list.items(): if value: prints('{green}{0:<20s}{reset}'.format(key, **ansi), indent=indent + 10) prints({v: getattr(self, v) for v in value}, indent=indent + 10) prints('-' * 20, indent=indent + 10)
def summary(self, indent: int = None): indent = indent if indent is not None else self.indent prints('{blue_light}{0:<30s}{reset} Parameters: '.format( self.name, **ansi), indent=indent) prints('{yellow}{0}{reset}'.format(self.__class__.__name__, **ansi), indent=indent) for key in self.param_list['trainer']: value = getattr(self, key) if value: prints('{green}{0:<10s}{reset}'.format(key, **ansi), indent=indent + 10) if isinstance(value, dict): value = {k: str(v).split('\n')[0] for k, v in value.items()} prints(value, indent=indent + 10) prints('-' * 20, indent=indent + 10)
def get_mark_loss_list( self, verbose: bool = True, **kwargs) -> tuple[torch.Tensor, list[float], list[float]]: r"""Get list of mark, loss, asr of recovered trigger for each class. Args: verbose (bool): Whether to output jaccard index for each trigger. It's also passed to :meth:`optimize_mark()`. **kwargs: Keyword arguments passed to :meth:`optimize_mark()`. Returns: (torch.Tensor, list[float], list[float]): list of mark, loss, asr with length ``num_classes``. """ mark_list: list[torch.Tensor] = [] loss_list: list[float] = [] asr_list: list[float] = [] # todo: parallel to avoid for loop file_path = os.path.normpath( os.path.join(self.folder_path, self.get_filename() + '.npz')) for label in range(self.model.num_classes): print('Class: ', output_iter(label, self.model.num_classes)) mark, loss = self.optimize_mark(label, verbose=verbose, **kwargs) if verbose: asr, _ = self.attack.validate_fn(indent=4) if not self.mark_random_pos: select_num = self.attack.mark.mark_height * self.attack.mark.mark_width overlap = mask_jaccard(self.attack.mark.get_mask(), self.real_mask, select_num=select_num) prints(f'Jaccard index: {overlap:.3f}', indent=4) else: asr, _ = self.model._validate(get_data_fn=self.attack.get_data, keep_org=False, poison_label=True, verbose=False) mark_list.append(mark) loss_list.append(loss) asr_list.append(asr) np.savez(file_path, mark_list=np.stack( [mark.detach().cpu().numpy() for mark in mark_list]), loss_list=np.array(loss_list)) print() print('Defense results saved at: ' + file_path) mark_list_tensor = torch.stack(mark_list) return mark_list_tensor, loss_list, asr_list
def output_layer_information(layer: nn.Module, depth: int = 0, verbose: bool = True, indent: int = 0, tree_length: int = None): tree_length = tree_length if tree_length is not None else 10 * (depth + 1) if depth > 0: for name, module in layer.named_children(): _str = '{blue_light}{0}{reset}'.format(name, **ansi) if verbose: _str = _str.ljust( tree_length - indent + len(ansi['blue_light']) + len(ansi['reset'])) item = str(module).split('\n')[0] if item[-1] == '(': item = item[:-1] _str += item prints(_str, indent=indent) Model.output_layer_information( module, depth=depth - 1, indent=indent + 10, verbose=verbose, tree_length=tree_length)
def validate_target(self, indent: int = 0, verbose=True) -> tuple[float, float]: self.model.eval() _output = self.model(self.temp_input) target_acc, _ = self.model.accuracy(_output, self.temp_label, topk=(1, 5)) target_conf = float( self.model.get_target_prob(self.temp_input, self.temp_label).mean()) target_loss = self.model.loss(self.temp_input, self.temp_label) if verbose: prints( f'Validate Target: Loss: {target_loss:10.4f} Confidence: {target_conf:10.4f} Accuracy: {target_acc:7.3f}', indent=indent) # todo: Return value return target_conf, target_acc
def download_and_extract_archive(self, mode: str): file_name = f'{self.name}_{mode}{self.ext[mode]}' file_path = os.path.normpath(os.path.join(self.folder_path, file_name)) md5 = self.md5.get(mode) if not check_integrity(file_path, md5=md5): prints('{yellow}Downloading Dataset{reset} '.format(**ansi), f'{self.name} {mode:5s}: {file_path}', indent=10) download_file_from_google_drive(file_id=self.url[mode], root=self.folder_path, filename=file_name, md5=md5) print('{upline}{clear_line}'.format(**ansi)) else: prints('{yellow}File Already Exists{reset}: '.format(**ansi), file_path, indent=10) extract_archive(from_path=file_path, to_path=self.folder_path)
def get_mark_loss_list( self) -> tuple[torch.Tensor, list[float], list[float]]: print('sample neurons') all_ps = self.sample_neuron(self.seed_data['input']) print('find min max') self.neuron_dict = self.find_min_max(all_ps, self.seed_data['label']) format_str = self.serialize_format(layer='20s', neuron='5d', value='10.3f') # Output neuron dict information for label in range(self.model.num_classes): print('Class: ', output_iter(label, self.model.num_classes)) for _dict in reversed(self.neuron_dict[label]): prints(format_str.format(**_dict), indent=4) print() print('optimize marks') return super().get_mark_loss_list(verbose=False)
def optimize_mark(self, label: int, **kwargs) -> tuple[torch.Tensor, float]: format_dict = dict(layer='20s', neuron='5d', value='10.3f', loss='10.3f', asr='8.3f', norm='8.3f') if not self.attack.mark.mark_random_pos: format_dict['jaccard'] = '5.3f' select_num = self.attack.mark.mark_height * self.attack.mark.mark_width format_str = self.serialize_format(**format_dict) mark_best: torch.Tensor = torch.ones_like(self.attack.mark.mark) loss_best: float = 1e7 asr_best: float = 0.0 dict_best = {} for _dict in reversed(self.neuron_dict[label]): mark, loss = super().optimize_mark(label, loader=self.loader, verbose=False, **_dict) _dict['mark'] = mark.detach().cpu().clone().numpy() asr, _ = self.model._validate(get_data_fn=self.attack.get_data, keep_org=False, verbose=False) norm = float(mark[-1].flatten().norm(p=1)) str_dict = dict(loss=loss, asr=asr, norm=norm, **_dict) if not self.attack.mark.mark_random_pos: overlap = mask_jaccard(self.attack.mark.get_mask(), self.real_mask, select_num=select_num) str_dict['jaccard'] = overlap prints(format_str.format(**str_dict), indent=4) if asr > asr_best: asr_best = asr mark_best = mark loss_best = loss dict_best = str_dict format_str = self.serialize_format(color='yellow', **format_dict) print() prints(format_str.format(**dict_best), indent=4) self.attack.mark.mark = mark_best return mark_best, loss_best
def download(self, mode: str, url: str, file_path: str = None, folder_path: str = None, file_name: str = None, file_ext: str = 'zip') -> str: if file_path is None: if folder_path is None: folder_path = self.folder_path if file_name is None: file_name = f'{self.name}_{mode}.{file_ext}' file_path = folder_path + file_name if not os.path.exists(file_path[mode]): print(f'Downloading Dataset {self.name} {mode:5s}: {file_path}') download_url_to_file(url[mode], file_path[mode]) print('{upline}{clear_line}'.format(**ansi), end='') else: prints('File Already Exists: ', file_path, indent=10) return file_path