def __init__(self, args): super().__init__(args) self.in_channels = 1 if self.dataset == 'mnist' else 3 self.num_classes = { 'cifar10': 10, 'cifar100': 100, 'caltech-101': 101, 'caltech-256': 256, 'imagenet1000': 1000 }[self.dataset] self.train_data, self.valid_data = prepare_dataset(self.args) self.wm_data = None if self.use_trigger_as_passport: self.passport_data = prepare_wm('data/trigger_set/pics', crop=self.imgcrop) else: self.passport_data = self.valid_data if self.train_backdoor: self.wm_data = prepare_wm('data/trigger_set/pics', crop=self.imgcrop) self.construct_model() optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=0.0001) if len(self.lr_config[self.lr_config['type']] ) != 0: # if no specify steps, then scheduler = None scheduler = optim.lr_scheduler.MultiStepLR( optimizer, self.lr_config[self.lr_config['type']], self.lr_config['gamma']) else: scheduler = None self.trainer = TrainerPrivate(self.model, optimizer, scheduler, self.device) if self.is_tl: self.finetune_load() else: self.makedirs_or_load()
def pruning(self): device = self.device logdir = f'/data-x/g12/zhangjie/DeepIPR/ours/{self.arch}_{self.dataset}_v3_{self.tag}/1' prun_dir = logdir + '/prun' if not os.path.exists(prun_dir): os.mkdir(prun_dir) shutil.copy('prun.py', str(prun_dir) + "/prun.py") title = '' # txt_pth = os.path.join(prun_dir, 'log_prun.txt') logger_prun = Logger(txt_pth, title=title) logger_prun.set_names(['Model for Releasing ', 'Model for Verification ', 'Trigger','Signature']) self.train_data, self.valid_data = prepare_dataset(self.args) sd = torch.load(logdir + '/models/best.pth') print(logdir + '/models/best.pth') self.model.load_state_dict(sd) self.model.to(self.device) test(self.model, device, self.valid_data, msg='Ori_pub Result', ind=0) test(self.model, device, self.valid_data, msg='Pri_pub Result', ind=1) for perc in [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]: sd = torch.load(logdir + '/models/best.pth') self.model.load_state_dict(sd) self.model.to(self.device) pruning_resnet(self.model, perc) res = {} res_wm = {} self.wm_data = prepare_wm('data/trigger_set/pics') res['perc'] = perc res['pub_ori'] = test(self.model, device, self.valid_data, msg='pruning %s percent Public Result' % perc, ind=0) res['pri_ori'] = test(self.model, device, self.valid_data, msg='pruning %s percent Private Result' % perc, ind=1) _, res['pri_sign_acc'] = test_signature(self.model) res_wm['pri_ori'] = test(self.model, device, self.wm_data, msg='pruning %s percent Pri_Trigger Result' % perc, ind=1) pub_acc = res['pub_ori']['acc'] pri_acc = res['pri_ori']['acc'] pri_acc_wm = res_wm['pri_ori']['acc'] pri_sign_acc = res['pri_sign_acc'] * 100 logger_prun.append([pub_acc, pri_acc, pri_acc_wm, pri_sign_acc]) file = open(txt_pth, "r") name = file.readline() names = name.rstrip().split('\t') # 标题分开 numbers = {} for _, name in enumerate(names): numbers[name] = [] for line in file: numbers_clm = line.rstrip().split('\t') for clm in range(0, len(numbers_clm)): numbers[names[clm]].append(numbers_clm[clm]) #########plot#################### plt.figure() names_legend = [] for i, name in enumerate(names): x = np.arange(len(numbers[name])) num_float = [] for num in numbers[name]: num_float.append(float(num)) names_legend.append(name) x = np.arange(len(numbers[name])) * 10 # start with 10% plt.plot(x, num_float) plt.legend([name for name in names_legend], fontsize=15) plt.grid(True) plt.ylabel('Accuracy (%)', fontsize=20) plt.xlabel('Pruning rate (%)', fontsize=20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.tight_layout() save_name = prun_dir + '/prun.eps' # plt.savefig(save_name) savefig(save_name)