示例#1
0
def test(args):
    device = torch.device(('cuda:%d' %
                           args.gpu) if torch.cuda.is_available() else 'cpu')
    BACKBONE = Backbone([args.input_size, args.input_size], args.num_layers,
                        args.mode)
    BACKBONE.load_state_dict(torch.load(args.ckpt_path))
    BACKBONE.to(device)
    BACKBONE.eval()
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    print('Start test at', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    # accuracy
    with open(args.pair_file, 'r') as f:
        pairs = f.readlines()
    sims = []
    labels = []
    for pair_id, pair in tqdm.tqdm(enumerate(pairs)):
        # print('processing %d/%d...' % (pair_id, len(pairs)), end='\r')
        splits = pair.split()
        feat1 = get_feature(os.path.join(args.data_root, splits[0]), transform,
                            BACKBONE, device)
        feat2 = get_feature(os.path.join(args.data_root, splits[1]), transform,
                            BACKBONE, device)
        label = int(splits[2])
        sim = np.dot(feat1,
                     feat2) / (np.linalg.norm(feat1) * np.linalg.norm(feat2))
        sims.append(sim)
        labels.append(label)
    acc, th = cal_accuracy(np.array(sims), np.array(labels))
    print('acc=%f with threshold=%f' % (acc, th))
    print('Finish test at', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
示例#2
0
def train(args):
    DEVICE = torch.device(("cuda:%d"%args.gpu[0]) if torch.cuda.is_available() else "cpu")
    writer = SummaryWriter(args.log_root)
    train_transform = transforms.Compose([transforms.Resize([int(128*args.input_size/112), int(128*args.input_size/112)]),
                                        transforms.RandomCrop([args.input_size, args.input_size]),
                                        transforms.RandomHorizontalFlip(),
                                        transforms.ToTensor(),
                                        transforms.Normalize(mean=[args.rgb_mean,args.rgb_mean,args.rgb_mean], std=[args.rgb_std,args.rgb_std,args.rgb_std])
                                        ])
    train_dataset = datasets.ImageFolder(args.data_root, train_transform)
    weights = make_weights_for_balanced_classes(train_dataset.imgs, len(train_dataset.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, shuffle=True, drop_last=True)
    NUM_CLASS = len(train_loader.dataset.classes)

    BACKBONE = Backbone([args.input_size, args.input_size], args.num_layers, args.mode)
    HEAD = ArcFace(args.emb_dims, NUM_CLASS, device_id=args.gpu)
    LOSS = FocalLoss()
    backbone_paras_only_bn, backbone_paras_wo_bn = separate_irse_bn_paras(BACKBONE)
    _, head_paras_wo_bn = separate_irse_bn_paras(HEAD)
    optimizer = optim.SGD([{'params': backbone_paras_wo_bn+head_paras_wo_bn, 'weight_decay': args.weight_decay},
                        {'params': backbone_paras_only_bn}], lr=args.lr, momentum=args.momentum)
    # optimizer = optim.AdamW([{'params': backbone_paras_wo_bn+head_paras_wo_bn, 'weight_decay': args.weight_decay},
    #                     {'params': backbone_paras_only_bn}], lr=args.lr, momentum=args.momentum)
    
    if args.load_ckpt:
        BACKBONE.load_state_dict(torch.load(os.path.join(args.load_ckpt, 'backbone_epoch{}.pth'.format(args.load_epoch))))
        HEAD.load_state_dict(torch.load(os.path.join(args.load_ckpt, 'head_epoch{}.pth'.format(args.load_epoch))))
        print('Checkpoint loaded')
    
    start_epoch = args.load_epoch if args.load_ckpt else 0

    BACKBONE = nn.DataParallel(BACKBONE, device_ids=args.gpu)
    BACKBONE = BACKBONE.to(DEVICE)

    dispaly_frequency = len(train_loader) // 100
    NUM_EPOCH_WARM_UP = args.num_epoch // 25
    NUM_BATCH_WARM_UP = len(train_loader) * NUM_EPOCH_WARM_UP
    batch = 0
    print('Start training at %s!' % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    for epoch in range(start_epoch, args.num_epoch):
        if epoch==args.stages[0] or epoch==args.stages[1] or epoch==args.stages[2]:
            for params in optimizer.param_groups:
                params['lr'] /= 10.
        BACKBONE.train()
        HEAD.train()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        for inputs, labels in train_loader:
            if (epoch+1 <= NUM_EPOCH_WARM_UP) and (batch+1 <= NUM_BATCH_WARM_UP):
                for params in optimizer.param_groups:
                    params['lr'] = (batch+1) * args.lr / NUM_BATCH_WARM_UP
            inputs = inputs.to(DEVICE)
            labels = labels.to(DEVICE).long()
            features = BACKBONE(inputs)
            outputs = HEAD(features, labels)
            loss = LOSS(outputs, labels)
            prec1, prec5 = accuracy(outputs.data, labels, topk=(1,5))
            losses.update(loss.data.item(), inputs.size(0))
            top1.update(prec1.data.item(), inputs.size(0))
            top5.update(prec5.data.item(), inputs.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            batch += 1
            if batch % dispaly_frequency == 0:
                print('%s Epoch %d/%d Batch %d/%d: train loss %f, train prec@1 %f, train prec@5 %f' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    epoch, args.num_epoch, batch, len(train_loader)*args.num_epoch, losses.avg, top1.avg, top5.avg))
        writer.add_scalar('Train_Loss', losses.avg, epoch+1)
        writer.add_scalar('Train_Top1_Accuracy', top1.avg, epoch+1)
        writer.add_scalar('Train_Top5_Accuracy', top5.avg, epoch+1)
        torch.save(BACKBONE.module.state_dict(), os.path.join(args.ckpt_root, 'backbone_epoch%d.pth'%(epoch+1)))
        torch.save(HEAD.state_dict(), os.path.join(args.ckpt_root, 'head_epoch%d.pth'%(epoch+1)))
示例#3
0
class face_learner(object):
    def __init__(self, conf, inference=False):
        print(conf)
        # self.loader, self.class_num = construct_msr_dataset(conf)
        self.loader, self.class_num = get_train_loader(conf)
        self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode)
        print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = QAMFace(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)
            self.focalLoss = FocalLoss()

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            self.optimizer = optim.SGD(
                [{
                    'params': paras_wo_bn + [self.head.kernel],
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                lr=conf.lr,
                momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 1000
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 2
        else:
            self.threshold = conf.threshold

        # 多GPU训练
        self.model = torch.nn.DataParallel(self.model)
        self.model.to(conf.device)
        self.head = torch.nn.DataParallel(self.head)
        self.head = self.head.to(conf.device)

    def save_state(self,
                   conf,
                   accuracy,
                   to_save_folder=False,
                   extra=None,
                   model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path
        torch.save(
            self.model.state_dict(),
            save_path / ('model_{}_accuracy:{}_step:{}_{}.pth'.format(
                get_time(), accuracy, self.step, extra)))
        if not model_only:
            torch.save(
                self.head.state_dict(),
                save_path / ('head_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
            torch.save(
                self.optimizer.state_dict(),
                save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))

    def load_state(self,
                   conf,
                   fixed_str,
                   from_save_folder=False,
                   model_only=False):
        print('resume model from ' + fixed_str)
        if from_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path
        self.model.load_state_dict(
            torch.load(save_path / 'model_{}'.format(fixed_str)))
        if not model_only:
            self.head.load_state_dict(
                torch.load(save_path / 'head_{}'.format(fixed_str)))
            self.optimizer.load_state_dict(
                torch.load(save_path / 'optimizer_{}'.format(fixed_str)))

    def board_val(self,
                  db_name,
                  accuracy,
                  best_threshold=0,
                  roc_curve_tensor=0):
        self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy,
                               self.step)

    def train(self, conf, epochs):
        self.model.train()
        running_loss = 0.
        for e in range(epochs):
            print('epoch {} started'.format(e))
            # manually decay lr
            if e in self.milestones:
                self.schedule_lr()
            for imgs, labels in tqdm(iter(self.loader)):
                imgs = (imgs[:, (2, 1, 0)].to(conf.device) * 255)  # RGB
                labels = labels.to(conf.device)
                self.optimizer.zero_grad()
                embeddings = self.model(imgs)
                thetas = self.head(embeddings, labels)

                loss = self.focalLoss(thetas, labels)
                loss.backward()
                running_loss += loss.item() / conf.batch_size
                self.optimizer.step()

                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                if self.step % self.evaluate_every == 0 and self.step != 0:
                    self.model.eval()
                    for bmk in [
                            'agedb_30', 'lfw', 'calfw', 'cfp_ff', 'cfp_fp',
                            'cplfw', 'vgg2_fp'
                    ]:
                        acc = eval_emore_bmk(conf, self.model, bmk)
                        self.board_val(bmk, acc)

                    self.model.train()
                if self.step % self.save_every == 0 and self.step != 0:
                    self.save_state(conf, acc)

                self.step += 1

        self.save_state(conf, acc, to_save_folder=True, extra='final')

    def myValidation(self, conf):
        self.model.eval()

        for bmk in [
                'agedb_30', 'lfw', 'calfw', 'cfp_ff', 'cfp_fp', 'cplfw',
                'vgg2_fp'
        ]:
            eval_emore_bmk(conf, self.model, bmk)

    def schedule_lr(self):
        for params in self.optimizer.param_groups:
            params['lr'] /= 10
        print(self.optimizer)
示例#4
0
class face_learner(object):
    def __init__(self, conf, inference=False):
        print(conf)
        self.lr=conf.lr
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            
                print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.model = resnet101().to(conf.device)
                print('resnet101 model generated')
            
        
        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)        

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
                self.head_race = Arcface(embedding_size=conf.embedding_size, classnum=4).to(conf.device)
        
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.head = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
                self.head_race = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            
            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')    
            print('len of loader:',len(self.loader)) 
            self.board_loss_every = len(self.loader)//min(len(self.loader),100)
            self.evaluate_every = len(self.loader)//1
            self.save_every = len(self.loader)//1
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(conf.val_folder)
        else:
            #self.threshold = conf.threshold
            pass
    



    def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path
        torch.save(
            self.model.state_dict(), save_path /
            ('model_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
        if not model_only:
            torch.save(
                self.head.state_dict(), save_path /
                ('head_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
            torch.save(
                self.head_race.state_dict(), save_path /
                ('head__race{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
            torch.save(
                self.optimizer.state_dict(), save_path /
                ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
    
    def load_state(self, model, head=None,head_race=None,optimizer=None):
           
        self.model.load_state_dict(torch.load(model),strict=False)
        if head is not None:
            self.head.load_state_dict(torch.load(head))
        if head_race is not None:
            self.head_race.load_state_dict(torch.load(head_race))
        if optimizer is not None:
            self.optimizer.load_state_dict(torch.load(optimizer))

    def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor,tpr_val):
        self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy, self.step)
        self.writer.add_scalar('{}_best_threshold'.format(db_name), best_threshold, self.step)
        self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor, self.step)
        
        self.writer.add_scalar('{}[email protected]'.format(db_name), tpr_val, self.step)
#         self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step)
#         self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step)
#         self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step)
        
    def evaluate(self, conf, carray, issame, nrof_folds = 5, tta = False):
        self.model.eval()
        idx = 0
        embeddings = np.zeros([len(carray), conf.embedding_size])
        with torch.no_grad():
            while idx + conf.batch_size <= len(carray):
                batch = torch.tensor(carray[idx:idx + conf.batch_size])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
                    embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
                else:
                    embeddings[idx:idx + conf.batch_size] = self.model(batch.to(conf.device)).cpu()
                idx += conf.batch_size
            if idx < len(carray):
                batch = torch.tensor(carray[idx:])            
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
                    embeddings[idx:] = l2_norm(emb_batch)
                else:
                    embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
        tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
        try:
            tpr_val = tpr[np.less(fpr,0.0012)&np.greater(fpr,0.0008)][0]
            
        except:
            tpr_val = 0
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = trans.ToTensor()(roc_curve)
        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor,tpr_val
    
    def find_lr(self,
                conf,
                init_value=1e-8,
                final_value=10.,
                beta=0.98,
                bloding_scale=3.,
                num=None):
        if not num:
            num = len(self.loader)
        mult = (final_value / init_value)**(1 / num)
        lr = init_value
        for params in self.optimizer.param_groups:
            params['lr'] = lr
        self.model.train()
        avg_loss = 0.
        best_loss = 0.
        batch_num = 0
        losses = []
        log_lrs = []
        for i, (imgs, labels) in tqdm(enumerate(self.loader), total=num):

            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            batch_num += 1          

            self.optimizer.zero_grad()

            embeddings = self.model(imgs)
            thetas = self.head(embeddings, labels)
            loss = conf.ce_loss(thetas, labels)          
          
            #Compute the smoothed loss
            avg_loss = beta * avg_loss + (1 - beta) * loss.item()
            self.writer.add_scalar('avg_loss', avg_loss, batch_num)
            smoothed_loss = avg_loss / (1 - beta**batch_num)
            self.writer.add_scalar('smoothed_loss', smoothed_loss,batch_num)
            #Stop if the loss is exploding
            if batch_num > 1 and smoothed_loss > bloding_scale * best_loss:
                print('exited with best_loss at {}'.format(best_loss))
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses
            #Record the best loss
            if smoothed_loss < best_loss or batch_num == 1:
                best_loss = smoothed_loss
            #Store the values
            losses.append(smoothed_loss)
            log_lrs.append(math.log10(lr))
            self.writer.add_scalar('log_lr', math.log10(lr), batch_num)
            #Do the SGD step
            #Update the lr for the next step

            loss.backward()
            self.optimizer.step()

            lr *= mult
            for params in self.optimizer.param_groups:
                params['lr'] = lr
            if batch_num > num:
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses    

    def train(self, conf, epochs):
        self.model = self.model.to(conf.device)
        self.head = self.head.to(conf.device)
        self.head_race = self.head_race.to(conf.device)
        self.model.train()
        self.head.train()
        self.head_race.train()
        running_loss = 0.     
        for e in range(epochs):
                
        
            print('epoch {} started'.format(e))
            
            if e == 8:#5 #train hear_race
                #self.init_lr()
                conf.loss0 = False
                conf.loss1 = True
                conf.loss2 = True
                conf.model = False
                conf.head = False
                conf.head_race = True
                print(conf)
            if e == 16:#10:
                #self.init_lr()
                self.schedule_lr()
                conf.loss0 = True
                conf.loss1 = True
                conf.loss2 = True
                conf.model = True
                conf.head = True
                conf.head_race = True
                print(conf)
            if e == 28:#22
                self.schedule_lr()
            if e == 32:
                self.schedule_lr()      
            if e == 35:
                self.schedule_lr()      
            
            requires_grad(self.head,conf.head)
            requires_grad(self.head_race,conf.head_race)
            requires_grad(self.model,conf.model)                            
            for imgs, labels  in tqdm(iter(self.loader)):
                imgs = imgs.to(conf.device)
                labels = labels.to(conf.device)
                labels_race = torch.zeros_like(labels)
                
                race0_index = labels.lt(sum(conf.race_num[:1]))
                race1_index = labels.lt(sum(conf.race_num[:2])) & labels.ge(sum(conf.race_num[:1]))
                race2_index = labels.lt(sum(conf.race_num[:3])) & labels.ge(sum(conf.race_num[:2]))
                race3_index = labels.ge(sum(conf.race_num[:3]))
                labels_race[race0_index]=0
                labels_race[race1_index] = 1
                labels_race[race2_index] = 2
                labels_race[race3_index] = 3

                
                
                self.optimizer.zero_grad()
                embeddings = self.model(imgs)
                thetas ,w = self.head(embeddings, labels)
                thetas_race ,w_race = self.head_race(embeddings, labels_race)
                loss = 0
                loss0 = conf.ce_loss(thetas, labels) 
                loss1 = conf.ce_loss(thetas_race, labels_race)
                loss2 = torch.mm(w_race.t(),w).to(conf.device)
                
                target =  torch.zeros_like(loss2).to(conf.device)
                
                target[0][:sum(conf.race_num[:1])] = 1
                target[1][sum(conf.race_num[:1]):sum(conf.race_num[:2])] = 1
                target[2][sum(conf.race_num[:2]):sum(conf.race_num[:3])] = 1
                target[3][sum(conf.race_num[:3]):] = 1
                
                weight = torch.zeros_like(loss2).to(conf.device)
                for i in range(4):
                    weight[i,:] = sum(conf.race_num)/conf.race_num[i] 
                #loss2 = torch.nn.functional.mse_loss(loss2 , target)
                
                loss2 = F.binary_cross_entropy(torch.sigmoid(loss2),target,weight)
                if conf.loss0 ==True:
                    loss += 2*loss0
                if conf.loss1 ==True:
                    loss += loss1
                if conf.loss2 ==True:
                    loss += loss2
                #loss = loss0 + loss1 + loss2
                loss.backward()
                running_loss += loss.item()
                self.optimizer.step()
                
                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.
                
                if self.step % self.evaluate_every == 0 and self.step != 0:
                    accuracy=None
                    accuracy, best_threshold, roc_curve_tensor ,tpr_val= self.evaluate(conf, self.agedb_30, self.agedb_30_issame)
                    self.board_val('agedb_30', accuracy, best_threshold, roc_curve_tensor,tpr_val)
                    accuracy, best_threshold, roc_curve_tensor,tpr_val = self.evaluate(conf, self.lfw, self.lfw_issame)
                    self.board_val('lfw', accuracy, best_threshold, roc_curve_tensor,tpr_val)
                    accuracy, best_threshold, roc_curve_tensor,tpr_val = self.evaluate(conf, self.cfp_fp, self.cfp_fp_issame)
                    self.board_val('cfp_fp', accuracy, best_threshold, roc_curve_tensor,tpr_val)
                    self.model.train()
                    
                if self.step % self.save_every == 0 and self.step != 0:
                    
                    self.save_state(conf, accuracy)
                self.step += 1
                
        self.save_state(conf, accuracy, to_save_folder=True, extra='final')

    def schedule_lr(self):
        for params in self.optimizer.param_groups:                 
            params['lr'] /= 10
        print(self.optimizer)
        
    def init_lr(self):
        for params in self.optimizer.param_groups:
            params['lr'] = self.lr
        print(self.optimizer)
        
        
    def schedule_lr_add(self):
        for params in self.optimizer.param_groups:                 
            params['lr'] *= 10
        print(self.optimizer)
        
        
    def infer(self, conf, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:                        
                embs.append(self.model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)
        
        diff = source_embs.unsqueeze(-1) - target_embs.transpose(1,0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1
        return min_idx, minimum               
class face_learner(object):
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold

    def save_state(self,
                   conf,
                   accuracy,
                   to_save_folder=False,
                   extra=None,
                   model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path
        torch.save(
            self.model.state_dict(),
            save_path / ('model_{}_accuracy:{}_step:{}_{}.pth'.format(
                get_time(), accuracy, self.step, extra)))
        if not model_only:
            torch.save(
                self.head.state_dict(),
                save_path / ('head_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
            torch.save(
                self.optimizer.state_dict(),
                save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))

    def load_state(self,
                   conf,
                   fixed_str,
                   from_save_folder=False,
                   model_only=False):
        if from_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path
        #self.model.load_state_dict(torch.load(save_path/'model_{}'.format(fixed_str)))
        state_dict = torch.load(save_path / 'model_{}'.format(fixed_str))
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]
            new_state_dict[name] = v
        self.model.load_state_dict(new_state_dict)
        #
        if not model_only:
            self.head.load_state_dict(
                torch.load(save_path / 'head_{}'.format(fixed_str)))
            self.optimizer.load_state_dict(
                torch.load(save_path / 'optimizer_{}'.format(fixed_str)))

    def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor):
        self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy,
                               self.step)
        self.writer.add_scalar('{}_best_threshold'.format(db_name),
                               best_threshold, self.step)
        self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor,
                              self.step)
#         self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step)
#         self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step)
#         self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step)

    def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
        self.model.eval()
        idx = 0
        embeddings = np.zeros([len(carray), conf.embedding_size])
        with torch.no_grad():
            while idx + conf.batch_size <= len(carray):
                batch = torch.tensor(carray[idx:idx + conf.batch_size])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(
                        fliped.to(conf.device))
                    embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
                else:
                    embeddings[idx:idx + conf.batch_size] = self.model(
                        batch.to(conf.device)).cpu()
                idx += conf.batch_size
            if idx < len(carray):
                batch = torch.tensor(carray[idx:])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)) + self.model(
                        fliped.to(conf.device))
                    embeddings[idx:] = l2_norm(emb_batch)
                else:
                    embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
        tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                       nrof_folds)
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = trans.ToTensor()(roc_curve)
        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor

    def find_lr(self,
                conf,
                init_value=1e-8,
                final_value=10.,
                beta=0.98,
                bloding_scale=3.,
                num=None):
        if not num:
            num = len(self.loader)
        mult = (final_value / init_value)**(1 / num)
        lr = init_value
        for params in self.optimizer.param_groups:
            params['lr'] = lr
        self.model.train()
        avg_loss = 0.
        best_loss = 0.
        batch_num = 0
        losses = []
        log_lrs = []
        for i, (imgs, labels) in tqdm(enumerate(self.loader), total=num):

            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            batch_num += 1

            self.optimizer.zero_grad()

            embeddings = self.model(imgs)
            thetas = self.head(embeddings, labels)
            loss = conf.ce_loss(thetas, labels)

            #Compute the smoothed loss
            avg_loss = beta * avg_loss + (1 - beta) * loss.item()
            self.writer.add_scalar('avg_loss', avg_loss, batch_num)
            smoothed_loss = avg_loss / (1 - beta**batch_num)
            self.writer.add_scalar('smoothed_loss', smoothed_loss, batch_num)
            #Stop if the loss is exploding
            if batch_num > 1 and smoothed_loss > bloding_scale * best_loss:
                print('exited with best_loss at {}'.format(best_loss))
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses
            #Record the best loss
            if smoothed_loss < best_loss or batch_num == 1:
                best_loss = smoothed_loss
            #Store the values
            losses.append(smoothed_loss)
            log_lrs.append(math.log10(lr))
            self.writer.add_scalar('log_lr', math.log10(lr), batch_num)
            #Do the SGD step
            #Update the lr for the next step

            loss.backward()
            self.optimizer.step()

            lr *= mult
            for params in self.optimizer.param_groups:
                params['lr'] = lr
            if batch_num > num:
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses

    def train(self, conf, epochs):
        self.model.train()
        # Using GPU
        conf.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model = nn.DataParallel(self.model, device_ids=[0, 1, 2, 3])
        self.model.to(conf.device)
        #
        running_loss = 0.
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            for imgs, labels in tqdm(iter(self.loader)):
                imgs = imgs.to(conf.device)
                labels = labels.to(conf.device)
                self.optimizer.zero_grad()
                embeddings = self.model(imgs)
                thetas = self.head(embeddings, labels)
                loss = conf.ce_loss(thetas, labels)
                loss.backward()
                running_loss += loss.item()
                self.optimizer.step()

                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                if self.step % self.evaluate_every == 0 and self.step != 0:
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.agedb_30, self.agedb_30_issame)
                    self.board_val('agedb_30', accuracy, best_threshold,
                                   roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.lfw, self.lfw_issame)
                    self.board_val('lfw', accuracy, best_threshold,
                                   roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.cfp_fp, self.cfp_fp_issame)
                    self.board_val('cfp_fp', accuracy, best_threshold,
                                   roc_curve_tensor)
                    self.model.train()
                if self.step % self.save_every == 0 and self.step != 0:
                    self.save_state(conf, accuracy)

                self.step += 1

        self.save_state(conf, accuracy, to_save_folder=True, extra='final')

    def schedule_lr(self):
        for params in self.optimizer.param_groups:
            params['lr'] /= 10
        print(self.optimizer)

    def infer(self, conf, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1  # if no match, set idx to -1
        return min_idx, minimum
示例#6
0
    log_period = config.log_interval
    checkpoint_period = config.save_model_interval
    eval_period = config.test_interval

    device = "cuda"
    epochs = 80

    logger = logging.getLogger("reid_baseline.train")
    logger.info('start training')

    if device:
        if torch.cuda.device_count() > 1:
            print('Using {} GPUs for training'.format(torch.cuda.device_count()))
            model = nn.DataParallel(model)
        model.to(device)

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm='yes')
    model.base._freeze_stages()
    logger.info('Freezing the stages number:{}'.format(-1))
    # train
    for epoch in range(1, epochs + 1):
        start_time = time.time()
        loss_meter.reset()
        acc_meter.reset()
        evaluator.reset()
        scheduler.step()
        model.train()
示例#7
0
def train(args):
    # gpu init
    multi_gpu = False
    if len(args.gpus.split(',')) > 1:
        multi_gpu = True
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    D = MultiscaleDiscriminator(
        input_nc=3,
        ndf=64,
        n_layers=3,
        use_sigmoid=False,
        norm_layer=torch.nn.InstanceNorm2d)  # pix2pix use MSEloss
    G = AAD_Gen()
    F = Backbone(50, drop_ratio=0.6, mode='ir_se')
    F.load_state_dict(torch.load(args.arc_model_path))
    E = Att_Encoder()

    optimizer_D = torch.optim.Adam(D.parameters(),
                                   lr=0.0004,
                                   betas=(0.0, 0.999))
    optimizer_GE = torch.optim.Adam([{
        'params': G.parameters()
    }, {
        'params': E.parameters()
    }],
                                    lr=0.0004,
                                    betas=(0.0, 0.999))

    if multi_gpu:
        D = DataParallel(D).to(device)
        G = DataParallel(G).to(device)
        F = DataParallel(F).to(device)
        E = DataParallel(E).to(device)
    else:
        D = D.to(device)
        G = G.to(device)
        F = F.to(device)
        E = E.to(device)

    if args.resume:
        if os.path.isfile(args.resume_model_path):
            print("Loading checkpoint from {}".format(args.resume_model_path))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            D.load_state_dict(checkpoint["state_dict_D"])
            G.load_state_dict(checkpoint["state_dict_G"])
            E.load_state_dict(checkpoint["state_dict_E"])
            #            optimizer_G.load_state_dict(checkpoint['optimizer_G'])
            optimizer_D.load_state_dict(checkpoint['optimizer_D'])
            optimizer_GE.load_state_dict(checkpoint['optimizer_GE'])
        else:
            print('Cannot found checkpoint {}'.format(args.resume_model_path))
    else:
        args.start_epoch = 1

    def print_with_time(string):
        print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + string)

    def weights_init(m):
        classname = m.__class__.__name__
        if isinstance(m, nn.Conv2d):
            nn.init.normal_(m.weight.data, 0.0, 0.02)
        if classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0)

    def set_requires_grad(nets, requires_grad=False):

        if not isinstance(nets, list):
            nets = [nets]
        for net in nets:
            if net is not None:
                for param in net.parameters():
                    param.requires_grad = requires_grad

    def trans_batch(batch):
        t = trans.Compose(
            [trans.ToPILImage(),
             trans.Resize((112, 112)),
             trans.ToTensor()])
        bs = batch.shape[0]
        res = torch.ones(bs, 3, 112, 112).type_as(batch)
        for i in range(bs):
            res[i] = t(batch[i].cpu())
        return res

    set_requires_grad(F, requires_grad=False)
    data_transform = trans.Compose([
        trans.ToTensor(),
        trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    #dataset = ImageFolder(args.data_path, transform=data_transform)
    dataset = FaceEmbed(args.data_path)
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             drop_last=True)
    D.apply(weights_init)
    G.apply(weights_init)
    E.apply(weights_init)

    for epoch in range(args.start_epoch, args.total_epoch + 1):
        D.train()
        G.train()
        F.eval(
        )  #   Only extract features!  # input dim=3,256,256   out dim=256 !
        E.train()

        for batch_idx, data in enumerate(data_loader):
            time_curr = time.time()
            iteration = (epoch - 1) * len(data_loader) + batch_idx
            try:
                source, target, label = data

                source = source.to(device)
                target = target.to(device)
                label = torch.LongTensor(label).to(device)

                #Zid =F(trans_batch(source))  # bs, 512
                Zid = F(
                    downsample(source[:, :, 50:-10, 30:-30], size=(112, 112)))
                Zatt = E(target)  # list:8  each:bs,,,
                Yst0 = G(Zid, Zatt)  # bs,3,256,256

                # train discriminators
                pred_gen = D(Yst0.detach())
                #pred_gen = list(map(lambda x: x[0].detach(), pred_gen))
                pred_real = D(target)
                optimizer_D.zero_grad()
                loss_real, loss_fake = loss_hinge_dis()(pred_gen, pred_real)
                L_dis = loss_real + loss_fake
                #    if batch_idx%3==0:
                L_dis.backward()
                optimizer_D.step()

                # train generators
                pred_gen = D(Yst0)
                L_gen = loss_hinge_gen()(pred_gen)
                #L_id = IdLoss()(F(trans_batch(Yst0)), Zid)
                L_id = IdLoss()(F(
                    downsample(Yst0[:, :, 50:-10, 30:-30], size=(112, 112))),
                                Zid)
                #Zatt = list(map(lambda x: x.detach(), Zatt))
                L_att = AttrLoss()(E(Yst0), Zatt)
                L_Rec = RecLoss()(Yst0, target, label)

                Loss = (L_gen + 10 * L_att + 5 * L_id + 10 * L_Rec).to(device)
                optimizer_GE.zero_grad()
                Loss.backward()
                optimizer_GE.step()

            except Exception as e:
                print(e)
                continue

            if batch_idx % args.log_interval == 0 or batch_idx == 20:
                time_used = time.time() - time_curr
                print_with_time(
                    'Train Epoch: {} [{}/{} ({:.0f}%)], L_dis:{:.4f}, loss_real:{:.4f}, loss_fake:{:.4f}, Loss:{:.4f}, L_gen:{:.4f}, L_id:{:.4f}, L_att:{:.4f}, L_Rec:{:.4f}'
                    .format(
                        epoch, batch_idx * len(data), len(data_loader.dataset),
                        100. * batch_idx *
                        len(data) / len(data_loader.dataset), L_dis.item(),
                        loss_real.item(), loss_fake.item(), Loss.item(),
                        L_gen.item(), 5 * L_id.item(), 10 * L_att.item(),
                        10 * L_Rec))
                time_curr = time.time()

        if epoch % args.save_interval == 0:  #or batch_idx*len(data) % 350004==0:
            state = {
                "epoch": epoch,
                "state_dict_D": D.state_dict(),
                "state_dict_G": G.state_dict(),
                "state_dict_E": E.state_dict(),
                "optimizer_D": optimizer_D.state_dict(),
                "optimizer_GE": optimizer_GE.state_dict(),
                #                        "optimizer_E": optimizer_E.state_dict(),
            }
            filename = "../model/train1_{:03d}_{:03d}.pth.tar".format(
                epoch, batch_idx * len(data))
            torch.save(state, filename)