def get_model(args, number): model = None if (args.model == 'dpp_vgg16'): model = VGG('VGG16', 0) model = nn.DataParallel(model) model.cuda() save_name = 'checkpoint_model_' + str(number) + '.tar' model_load = torch.load('save_vgg16_cifar10_best/' + save_name) model.load_state_dict(model_load['state_dict']) if model is None: print("Model is None") raise TypeError return model
class EmotionDetector: def __init__(self, model='VGG19', main_dir=main_dir_path, face_detector='undefined', use_cuda=False, reliability=0.8): self.main_dir = main_dir self.face_detector = face_detector self.use_cuda = use_cuda self.reliability = reliability self.cut_size = 44 self.transform_test = transforms.Compose([ transforms.TenCrop(self.cut_size), transforms.Lambda(lambda crops: torch.stack( [transforms.ToTensor()(crop) for crop in crops])), ]) self.class_names = [ 'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral' ] if model == 'VGG19': self.net = VGG('VGG19') elif model == 'Resnet18': self.net = ResNet18() self.checkpoint = torch.load(os.path.join( self.main_dir + 'pretrained_model/' + model, 'PrivateTest_model.t7'), map_location='cpu') self.net.load_state_dict(self.checkpoint['net']) if self.use_cuda: self.net.cuda() self.net.eval() def rgb2gray(self, rgb): return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]) def detect_emotion_single_face(self, raw_img): ''' This function is used to dectect facial emotion for an image of single face ''' gray = self.rgb2gray(raw_img) gray = resize(gray, (48, 48), mode='symmetric').astype(np.uint8) img = gray[:, :, np.newaxis] img = np.concatenate((img, img, img), axis=2) img = Image.fromarray(img) inputs = self.transform_test(img) ncrops, c, h, w = np.shape(inputs) inputs = inputs.view(-1, c, h, w) if self.use_cuda: inputs = inputs.cuda() inputs = Variable(inputs, volatile=True) outputs = self.net(inputs) outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops score = F.softmax(outputs_avg) _, predicted = torch.max(outputs_avg.data, 0) if torch.max(score) > self.reliability: #return score, predicted return score, self.class_names[int(predicted.cpu().numpy())] else: return score, 'UNK' def detect_emotion_multiple_face(self, raw_img): ''' This function is used to dectect facial emotion for an image with multiple faces ''' if isinstance(self.face_detector, MTCNN): bounding_boxes, _, _ = self.face_detector.align(raw_img) else: print( 'No MTCNN face dectector found.' ) #TODO: change to add more facedetection model to do experiments) scores = [] predicteds = [] for facebox in bounding_boxes: face_img = raw_img[int(facebox[1]):int(facebox[3]), int(facebox[0]):int(facebox[2])] gray = self.rgb2gray(face_img) gray = resize(gray, (48, 48), mode='symmetric').astype(np.uint8) img = gray[:, :, np.newaxis] img = np.concatenate((img, img, img), axis=2) img = Image.fromarray(img) inputs = self.transform_test(img) ncrops, c, h, w = np.shape(inputs) inputs = inputs.view(-1, c, h, w) if self.use_cuda: inputs = inputs.cuda() inputs = Variable(inputs, volatile=True) outputs = self.net(inputs) outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops score = F.softmax(outputs_avg) _, predicted = torch.max(outputs_avg.data, 0) scores.append(score) #predicteds.append(predicted) if torch.max(score) > self.reliability: predicteds.append(self.class_names[int( predicted.cpu().numpy())]) else: predicteds.append('UNK') return bounding_boxes, scores, predicteds def detect_emotion_from_faceboxes(self, faceboxes): ''' ''' scores = [] predicteds = [] for facebox in faceboxes: gray = self.rgb2gray(face_img) gray = resize(gray, (48, 48), mode='symmetric').astype(np.uint8) img = gray[:, :, np.newaxis] img = np.concatenate((img, img, img), axis=2) img = Image.fromarray(img) inputs = self.transform_test(img) ncrops, c, h, w = np.shape(inputs) inputs = inputs.view(-1, c, h, w) if self.use_cuda: inputs = inputs.cuda() inputs = Variable(inputs, volatile=True) outputs = self.net(inputs) outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops score = F.softmax(outputs_avg) _, predicted = torch.max(outputs_avg.data, 0) scores.append(score) #predicteds.append(predicted) if torch.max(score) > self.reliability: predicteds.append(self.class_names[int( predicted.cpu().numpy())]) else: predicteds.append('UNK') return scores, predicteds
class DPP(object): def __init__(self, args): self.criterion = nn.CrossEntropyLoss().cuda() self.lr = args.lr self.epochs = args.epochs self.save_dir = './' + args.save_dir #later change if (os.path.exists(self.save_dir) == False): os.mkdir(self.save_dir) if (args.model == 'vgg16'): self.model = VGG('VGG16', 0) self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay) self.model = torch.nn.DataParallel(self.model) self.model.cuda() elif (args.model == 'dpp_vgg16'): self.model = integrated_kernel(args) self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay) #Parallel num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) print('The number of parametrs of models is', num_params) if (args.save_load): location = args.save_location print("locaton", location) checkpoint = torch.load(location) self.model.load_state_dict(checkpoint['state_dict']) def train(self, train_loader, test_loader, graph): #Declaration Model self.model.train() best_prec = 0 losses = AverageMeter() top1 = AverageMeter() for epoch in range(self.epochs): #Test Accuarcy #self.adjust_learning_rate(epoch) for k, (inputs, target) in enumerate(train_loader): target = target.cuda(async=True) input_var = inputs.cuda() target_var = target output = self.model(input_var) loss = self.criterion(output, target_var) #Compute gradient and Do SGD step self.optimizer.zero_grad() loss.backward() self.optimizer.step() #Measure accuracy and record loss prec1 = self.accuracy(output.data, target)[0] losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) graph.train_loss(losses.avg, epoch, 'train_loss') graph.train_acc(top1.avg, epoch, 'train_acc') prec = self.test(test_loader, epoch, graph) if (prec > best_prec): print("Acc", prec) best_prec = prec self.save_checkpoint( { 'best_prec1': best_prec, 'state_dict': self.model.state_dict(), }, filename=os.path.join(self.save_dir, 'checkpoint_{}.tar'.format(epoch))) def test(self, test_loader, epoch, test_graph): self.model.eval() losses = AverageMeter() top1 = AverageMeter() for k, (inputs, target) in enumerate(test_loader): target = target.cuda() inputs = inputs.cuda() #Calculate each model #Compute gradient and Do SGD step output = self.model(inputs) loss = self.criterion(output, target) #Measure accuracy and record loss prec1 = self.accuracy(output.data, target)[0] losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) test_graph.test_loss(losses.avg, epoch, 'test_loss') test_graph.test_acc(top1.avg, epoch, 'test_acc') return top1.avg def accuracy(self, output, target, topk=(1, )): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def adjust_learning_rate(self, epoch): self.lr = self.lr * (0.1**(epoch // 90)) for param_group in self.optimizer.param_groups: param_group['lr'] = self.lr def save_checkpoint(self, state, filename='checkpoint.pth.tar'): torch.save(state, filename)
import torch.nn.functional as F import torch.optim as optim import torch.backends.cudnn as cudnn import attacks import numpy as np import example from models.vgg import VGG from models.lenet import LeNet use_cuda = torch.cuda.is_available() attacker = attacks.FGSM() # attacker.load('saved/VGG16_attacker_0.005.pth') model = VGG('VGG16') model.cuda() model = torch.nn.DataParallel(model, device_ids=list(range( torch.cuda.device_count()))) model.load_state_dict(torch.load('saved/VGG16.pth')) criterion = nn.CrossEntropyLoss() trainloader, testloader = example.load_cifar() for inputs, labels in testloader: inputs = Variable((inputs.cuda() if use_cuda else inputs), requires_grad=True) labels = Variable((labels.cuda() if use_cuda else labels), requires_grad=False) adv_inputs, i, j = attacker.attack(inputs, labels, model) vutils.save_image(inputs.data, 'images/VGG_unperturbed.png')