Example #1
0
    def test_op(self, basic):
        labelset, img_path, centerset, bboxset = get_pic_info(
            self.pic_path, self.pic_info)
        set = RPC(labelset, img_path, centerset, bboxset, self.batch_size,
                  self.pic_size)
        dataset = torch.utils.data.DataLoader(dataset=set,
                                              batch_size=self.batch_size,
                                              shuffle=False)

        with torch.no_grad():
            basic_uns = basic.unsqueeze(0).to(self.device)
            basic_logit = self.target_model(basic_uns)
            basic_logit = F.softmax(basic_logit, dim=1)[0]

            loss = torch.sum(torch.mul(basic_logit, torch.log(basic_logit)))
            for id, (image, label, center, bbox, path,
                     _) in enumerate(dataset):
                basic_uns = basic.unsqueeze(0).to(self.device)
                image = Variable(image).to(self.device)
                y = Variable(label)

                extract_list = ["layer4"]
                extract_result = FeatureExtractor(self.target_model,
                                                  extract_list)
                basic_feature = extract_result(basic_uns)[0]
                image_feature = extract_result(image)[0]
                basic_gram = self.gram_matrix(basic_feature[0])
                img_gram = self.gram_matrix(image_feature[0])
                loss3 = F.mse_loss(basic_gram, img_gram)
                for i in range(1, len(image)):
                    img_gram = self.gram_matrix(image_feature[i])
                    loss3 += F.mse_loss(basic_gram, img_gram)
                loss3 = torch.log(loss3)
                loss += loss3
        return loss / len(labelset)
Example #2
0
	def test_op(self, patch_cpu):
		test_pic_info = "../new_class_info/test/correct_test.txt"
		labelset, img_path, centerset, bboxset = get_pic_info(self.pic_path, test_pic_info)
		length = len(labelset)
		if length == 0:
			return 0
		set = RPC(labelset, img_path, centerset, bboxset, self.batch_size, self.image_size)
		test_dataset = torch.utils.data.DataLoader(dataset=set, batch_size=self.batch_size, shuffle=True)
		#print("lr = " + str(learning_rate))
		
		ori_correct_count = 0
		target_correct_count = 0
		top3 = 0.0
		top5 = 0.0
		for id,(image, label, center, bbox, path, _) in enumerate(test_dataset):
			
			all_offset = center
			y = Variable(label)
			real_image = Variable(image).to(self.device)
			offset_x, offset_y = all_offset[0][0], all_offset[1][0]
			
			all_patch, all_mask = self.pad_transform(patch_cpu, self.image_size, self.patch_size, offset_x, offset_y)
			all_patch = all_patch.unsqueeze(0)
			all_mask = all_mask.unsqueeze(0)
			for i in range (1, len(real_image)):
				offset_x, offset_y = all_offset[0][i], all_offset[1][i]
				patch, mask = self.pad_transform(patch_cpu, self.image_size, self.patch_size, offset_x, offset_y)
				patch = patch.unsqueeze(0)
				mask = mask.unsqueeze(0)
				all_patch = torch.cat([all_patch, patch], 0)
				all_mask = torch.cat([all_mask, mask], 0)
			
			patch, mask = all_patch.to(self.device), all_mask.to(self.device)
			
			adv_image = torch.mul((1 - mask), real_image) + torch.mul(mask, patch)
			
			adv_image = adv_image.to(self.cpu_device)
			with torch.no_grad():
				ori_correct, top_3, top_5 = self.target_model.test(adv_image, y)

			top3 += top_3
			top5 += top_5
			ori_correct_count += ori_correct
		ori_accu = float(ori_correct_count) / float(len(labelset))
		top3_acc = top3 / float(len(labelset))
		
		return ori_accu, top3_acc, top5 / float(len(labelset))
Example #3
0
    def train_op(self):
        self.build_dir()

        basic = Image.open(self.basic_path, 'r')
        self.x, self.y = basic.size
        x3, x4, x5, x6 = 1192.58, 782.87, 206.76, 214.61
        x3 = int(x3)
        x4 = int(x4)
        x5 = int(x5) + 1
        x6 = int(x6) + 1
        i = 200
        image = basic.crop((x3 - i, x4 - i, x3 + x5 + i, x4 + x6 + 100))
        new_x, new_y = image.size
        image = transforms.ToTensor()(image)
        x3 = (self.x - new_x) // 2
        x4 = (self.y - new_y) // 2
        pad = nn.ConstantPad2d((x3, x3, x4, x4), 1)
        basic = pad(image)
        preprocess = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((self.pic_size, self.pic_size)),
            transforms.ToTensor(),
        ])
        basic = preprocess(basic)
        basic.requires_grad_(True)

        optimizer = torch.optim.Adam([{
            'params': basic
        }],
                                     lr=self.learning_rate,
                                     weight_decay=1e-4,
                                     amsgrad=True)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=[2, 5, 8, 15, 25], gamma=self.gamma)

        counter = 0
        best_acc = float('inf')
        current_acc = 0.0

        labelset, img_path, centerset, bboxset = get_pic_info(
            self.pic_path, self.pic_info)
        length = len(labelset)
        if length == 0:
            return
        set = RPC(labelset,
                  img_path,
                  centerset,
                  bboxset,
                  self.batch_size,
                  self.pic_size,
                  trans=transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                             std=[0.5, 0.5, 0.5]))
        dataset = torch.utils.data.DataLoader(dataset=set,
                                              batch_size=self.batch_size,
                                              shuffle=True)

        start_time = time.time()
        x = []
        attack_loss = []
        x_acc = []
        ori_accuracy = []
        target_accuracy = []
        pro_mean = []
        top_5 = []
        for epoch in range(self.epoch):
            scheduler.step()
            batch_iteration = len(labelset) / self.batch_size
            for id, (image, label, center, bbox, path,
                     _) in enumerate(dataset):
                basic_uns = basic.unsqueeze(0).to(self.device)
                image = Variable(image).to(self.device)
                y = Variable(label)

                basic_logit = self.target_model(basic_uns)
                basic_logit = F.softmax(basic_logit, dim=1)[0]

                loss1 = torch.sum(
                    torch.mul(basic_logit, torch.log(basic_logit)))

                extract_list = ["layer4"]
                extract_result = FeatureExtractor(self.target_model,
                                                  extract_list)
                basic_feature = extract_result(basic_uns)[0]
                image_feature = extract_result(image)[0]
                basic_gram = self.gram_matrix(basic_feature[0])
                img_gram = self.gram_matrix(image_feature[0])
                loss3 = F.mse_loss(basic_gram, img_gram)
                for i in range(1, len(image)):
                    img_gram = self.gram_matrix(image_feature[i])
                    loss3 += F.mse_loss(basic_gram, img_gram)
                loss3 = torch.log(loss3)
                loss = self.epsilon * loss1 + loss3

                print('loss1:%.4f,loss2:%.4f\nloss:%.4f' %
                      (loss1, loss3, loss))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                basic.data.clamp_(-1, 1)

                if counter % (length // (self.batch_size)) == 0:
                    l = self.test_op(basic)
                    pl.figure()
                    x.append(counter)
                    attack_loss.append(l)
                    pl.plot(x, attack_loss)
                    pl.savefig(self.output_dir + 'loss/' + self.title +
                               '_loss.png')
                    pl.close('all')

                    t = time.time() - start_time
                    print("Epoch: [%2d] [%4d/%4d] time: %2dh:%2dm:%2ds" %
                          (epoch, id, batch_iteration, t // 3600, t // 60,
                           t % 60))
                    print("loss: %.4f" % l)
                    print("learning_rate: %.8f" % scheduler.get_lr()[0])

                    self.save_pic(basic.cpu().detach(), counter)
                    save_obj(
                        basic.cpu().detach().numpy(),
                        self.output_dir + 'patch_pkl/' + self.title + '_' +
                        str(counter) + '.pkl')
                    if l < best_acc:
                        best_acc = l
                    print("current loss: %.4f, best loss: %.4f" %
                          (l, best_acc))
                    print('***************************')

                counter += 1
Example #4
0
	def train_op(self):
		self.build_dir()
		#load fused prior
		patch_cpu = Image.open('../134_0_0.jpg', 'r')
		patch_cpu = patch_cpu.resize((512, 512))
		patch_cpu = patch_cpu.crop((0, 0, 32 , 32))
		patch_cpu = transforms.ToTensor()(patch_cpu)
		patch_cpu.requires_grad_(True)
		
		optimizer = torch.optim.Adam([{'params': patch_cpu}], lr=self.learning_rate, weight_decay=1e-4, amsgrad=True)
		scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2,4,8,15], gamma=self.gamma)
		
		counter = 0
		best_acc = 1.0
		current_acc = 0.0
		pic_info = "../new_class_info/train/correct_train.txt"

		labelset, img_path, centerset, bboxset = get_pic_info(self.pic_path, pic_info)
		length = len(labelset)
		if length == 0:
			return
		set = RPC(labelset, img_path, centerset, bboxset, self.batch_size, self.image_size)
		dataset = torch.utils.data.DataLoader(dataset=set, batch_size=self.batch_size, shuffle=False)

		start_time = time.time()
		x = []
		attack_loss = []
		x_acc = []
		ori_accuracy = []
		target_accuracy = []
		pro_mean = []
		top_5 = []
		for epoch in range(self.epoch):
			scheduler.step()
			batch_iteration = len(labelset) / self.batch_size
			for id,(image, label, center, bbox, path, _) in enumerate(dataset):
				
				all_offset = center
				
				real_image = Variable(image).to(self.device)
				y = Variable(label)
					
				offset_x, offset_y = all_offset[0][0], all_offset[1][0]
				
				all_patch, all_mask = self.pad_transform(patch_cpu, self.image_size, self.patch_size, offset_x, offset_y)
				all_patch = all_patch.unsqueeze(0)
				all_mask = all_mask.unsqueeze(0)
				for i in range (1, len(real_image)):
					offset_x, offset_y = all_offset[0][i], all_offset[1][i]
					patch, mask = self.pad_transform(patch_cpu, self.image_size, self.patch_size, offset_x, offset_y)
					patch = patch.unsqueeze(0)
					mask = mask.unsqueeze(0)
					all_patch = torch.cat([all_patch, patch], 0)
					all_mask = torch.cat([all_mask, mask], 0)
				
				patch, mask = all_patch.to(self.device), all_mask.to(self.device)
				
				
				adv_image = torch.mul((1 - mask), real_image) + torch.mul(mask, patch)

				adv = adv_image.to(self.cpu_device)
				loss = self.target_model.run_on_opencv_image(adv, y)
		
				optimizer.zero_grad()
				loss.backward()
				optimizer.step()
				patch_cpu.data.clamp_(0, 1)
				
				if counter % (length // self.batch_size) == 0:
					pl.figure()
					x.append(counter)
					attack_loss.append(loss)
					pl.plot(x,attack_loss)
					pl.savefig(self.output_dir + 'loss/' + self.title + '_loss.png')
					pl.figure()
					
					t = time.time() - start_time
					print("Epoch: [%2d] [%4d/%4d] time: %2dh:%2dm:%2ds" % (epoch, id, batch_iteration, t//3600, t//60, t%60))
					print("[Validation].......")
					print("learning_rate: %.8f" % scheduler.get_lr()[0])

					errAE = loss

					ori_acc, top3, top5 = self.test_op(patch_cpu)
					
					self.save_patched_pic(ori_acc, counter, path, adv_image, 'all', True)
					
					x_acc.append(counter)

					ori_accuracy.append(ori_acc)
					pl.plot(x_acc, ori_accuracy)
					pl.savefig(self.output_dir + 'accuracy/' + self.title + '_top1.png')
					
					pl.figure()
					
					pro_mean.append(top3)
					pl.plot(x_acc, pro_mean)
					pl.savefig(self.output_dir + 'accuracy/' + self.title + '_top3.png')
					pl.close('all')
					
					pl.figure()
					
					top_5.append(top5)
					pl.plot(x_acc, top_5)
					pl.savefig(self.output_dir + 'accuracy/' + self.title + '_top5.png')
					
					print("top1: %4.4f" % ori_acc)
					print("top3: %4.4f" % top3)
					print("top5: %4.4f" % top5)
					if ori_acc < best_acc:
						best_acc = ori_acc
						self.save_patch(patch_cpu.cpu().detach(), ori_acc, counter, 'all')
						save_obj(patch_cpu.cpu().detach().numpy(), self.output_dir + 'patch_pkl/' + self.title + '.pkl')
					print("current acc: %.4f, best acc: %.4f" % (ori_acc, best_acc))
					print('***************************')			
				counter += 1