Beispiel #1
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(
                '/home/zzg/DeepLearning/InsightFace_Pytorch/work_space/log')
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 29000  #100
            self.evaluate_every = len(self.loader) // 500  ##10
            self.save_every = len(self.loader) // 290  #5
            #            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            self.agedb_30, self.agedb_30_issame = get_val_data(
                '/home/zzg/DeepLearning/InsightFace_Pytorch/data/faces_emore')
        else:
            self.threshold = conf.threshold
    def __init__(self, conf, inference=False):
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            # print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            # print('{}_{} model generated done !'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            # print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            rootdir = os.path.join(args.root_dir, args.rec_path)
            self.board_loss_every = len(self.loader) // len(self.loader)
            self.evaluate_every = len(self.loader) // 1
            # self.save_every = len(self.loader)//len(self.loader)   # 5
            print('board loss every: {} -> evaluate_every: {} \n'.format(
                self.board_loss_every, self.evaluate_every))
            print('loader paths of validation dataset {}'.format(rootdir))
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                rootdir)
        else:
            self.threshold = conf.threshold
Beispiel #3
0
    def __init__(self, conf, inference=False):
        print(conf)
        print(conf.use_mobilfacenet)
        input("CONF")
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        
        if not inference:
            self.milestones = conf.milestones
            # Dataset Loader
            # ritorna un loader dataset ImageLoader
            self.loader, self.class_num = get_train_loader(conf)        

            # Classe di tensorboardX per salvare i log
            # log_path indica il percorso dove sono salvate le statistiche
            self.writer = SummaryWriter(conf.log_path)


            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)

            print('two model heads generated')
            # paras_only_bn contiene i layer con i parametri della batchnorm
            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            
            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # Parametri che indicano ogni quanto salvare i modelli e le epoche
            self.board_loss_every = len(self.loader)//10
            self.evaluate_every = len(self.loader)//10
            self.save_every = len(self.loader)//5
            print("DATASET")
            print(self.loader.dataset.root)
            # ritornano gli array e le labels delle diverse cartelle del dataset VALIDATION
            self.agedb_30 ,self.agedb_30_issame = get_val_data(self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
Beispiel #4
0
    def __init__(self, conf, inference=False):
        accuracy = 0.0
        logger.debug(conf)
        if conf.use_mobilfacenet:
            # self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            self.model = MobileFaceNet(conf.embedding_size).cuda()
            logger.debug('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).cuda()#.to(conf.device)
            logger.debug('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        if not inference:
            self.milestones = conf.milestones
            logger.info('loading data...')
            self.loader, self.class_num = get_train_loader(conf, 'emore', sample_identity=True)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = CircleLoss(m=0.25, gamma=256.0).cuda()

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn, 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            # self.optimizer = torch.nn.parallel.DistributedDataParallel(optimizer,device_ids=[conf.argsed])
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            if conf.fp16:
                self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O2")
                self.model = DistributedDataParallel(self.model).cuda()
            else:
                self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[conf.argsed]).cuda() #add line for distributed

            self.board_loss_every = len(self.loader)//100
            self.evaluate_every = len(self.loader)//2
            self.save_every = len(self.loader)//2
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(Path(self.loader.dataset.root).parent)
        else:
            self.threshold = conf.threshold
            self.loader, self.query_ds, self.gallery_ds = get_test_loader(conf)
    def __init__(self, conf, inference=False, transfer=0, ext='final'):
        pprint.pprint(conf)
        self.conf = conf
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            tmp_idx = ext.rfind('_')  # find the last '_' to replace it by '/'
            self.ext = '/' + ext[:tmp_idx] + '/' + ext[tmp_idx + 1:]
            self.writer = SummaryWriter(str(conf.log_path) + self.ext)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            self.optimizer = optim.Adam(
                list(self.model.parameters()) + list(self.head.parameters()),
                conf.lr)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.save_freq = len(self.loader) // 5  #//5 # originally, 100
            self.evaluate_every = len(self.loader)  #//5 # originally, 10
            self.save_every = len(self.loader)  #//2 # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_112, self.val_112_issame = get_val_pair(self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold

        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_accuracy = []
        self.test_counter = []
Beispiel #6
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_seesawFaceNet:
            self.model = seesaw_shuffleFaceNetv3(conf.embedding_size).to(conf.device)
            print('seesawFaceNetv3 (with slim) model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode)#.to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
		
        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)        

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = LiArcFace(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
            print('using LiArcFace as loss function')

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            self.model = torch.nn.DataParallel(self.model).cuda()
            cudnn.enabled = True
            cudnn.benchmark = True		
            if conf.use_seesawFaceNet:
                print('setting optimizer for seesawFacenet')
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.weight], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.weight], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')    
            self.board_loss_every = len(self.loader)//100
            self.evaluate_every = len(self.loader)//10
            self.save_every = len(self.loader)//2
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
Beispiel #7
0
    def __init__(self, conf, inference=False):

        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                    {'params': paras_only_bn}
                ], lr=conf.lr, momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD([
                    {'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                    {'params': paras_only_bn}
                ], lr=conf.lr, momentum=conf.momentum)
            print(self.optimizer)
            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
            try:
                if conf.use_mobilfacenet:
                    self.model.load_state_dict(torch.load(f'{conf.work_path}/mobilenet.pth'))
                    print('from recognizer: MobileFaceNet Loaded')
                else:
                    self.model.load_state_dict(torch.load(f'{conf.work_path}/ir_se50.pth'))
                    print('from recognizer: IR_SE_50 Loaded')
            except IOError as e:
                exit(f'from recognizer Exit: the weight does not exist,'
                     f' \n download and putting up in "{conf.work_path}" folder \n {e}')
Beispiel #8
0
    def __init__(self, conf, inference=False):
        print(conf)
        # self.loader, self.class_num = construct_msr_dataset(conf)
        self.loader, self.class_num = get_train_loader(conf)
        self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode)
        print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = QAMFace(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)
            self.focalLoss = FocalLoss()

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            self.optimizer = optim.SGD(
                [{
                    'params': paras_wo_bn + [self.head.kernel],
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                lr=conf.lr,
                momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 1000
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 2
        else:
            self.threshold = conf.threshold

        # 多GPU训练
        self.model = torch.nn.DataParallel(self.model)
        self.model.to(conf.device)
        self.head = torch.nn.DataParallel(self.head)
        self.head = self.head.to(conf.device)
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            print('class_num:', self.class_num)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # if conf.data_mode == 'small_vgg':
            #     self.board_loss_every = len(self.loader)
            #     print('len(loader', len(self.loader))
            #     self.evaluate_every = len(self.loader)
            #     self.save_every = len(self.loader)
            #     # self.lfw, self.lfw_issame = get_val_data(conf, conf.smallvgg_folder)

            # else:
            #     self.board_loss_every = len(self.loader)

            #     self.evaluate_every = len(self.loader)//10
            #     self.save_every = len(self.loader)//5
            self.agedb_30, self.cfp_fp, self.lfw, self.kface, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.kface_issame = get_val_data(
                conf, self.loader.dataset.root.parent)

        else:
            self.threshold = conf.threshold
    def __init__(self,
                 conf,
                 inference=False,
                 train_transforms=None,
                 val_transforms=None,
                 train_loader=None):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:

            self.milestones = conf.milestones
            if train_loader is None:
                self.loader, self.class_num = get_train_loader(
                    conf, train_transforms)
            else:
                self.loader = train_loader
                self.class_num = conf.num_classes

            if conf.net_mode in ['ir', 'ir_se']:
                self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                      conf.net_mode,
                                      conf.use_gap).to(conf.device)
            else:
                import json
                self.model = MetricNet(model_name=conf.net_mode,
                                       pooling=conf.pooling,
                                       use_fc=True,
                                       fc_dim=conf.embedding_size,
                                       dropout=conf.last_fc_dropout,
                                       pretrained=conf.pretrained,
                                       class_num=self.class_num).to(
                                           conf.device)
                print('{}_{} model generated'.format(conf.net_mode,
                                                     conf.net_depth))

            if conf.use_mobilfacenet or conf.net_mode in ['ir', 'ir_se']:
                self.head = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num).to(conf.device)
            else:
                if conf.loss_module == 'arcface':
                    self.head = ArcMarginProduct(self.model.final_in_features,
                                                 self.class_num,
                                                 s=conf.s,
                                                 m=conf.margin,
                                                 easy_margin=False,
                                                 ls_eps=conf.ls_eps).to(
                                                     conf.device)
                elif conf.loss_module == 'cosface':
                    self.head = AddMarginProduct(self.model.final_in_features,
                                                 self.class_num,
                                                 s=conf.s,
                                                 m=conf.margin).to(conf.device)
                elif conf.loss_module == 'adacos':
                    self.head = AdaCos(self.model.final_in_features,
                                       self.class_num,
                                       m=conf.margin,
                                       theta_zero=conf.theta_zero).to(
                                           conf.device)
                else:
                    self.head = nn.Linear(self.model.final_in_features,
                                          self.class_num).to(conf.device)

            print('two model heads generated')
            if conf.ft_model_path:
                self.load_ft_model(conf.ft_model_path, not conf.no_strict)
            elif conf.restore_suffix:
                self.load_state(conf,
                                conf.restore_suffix,
                                from_save_folder=False,
                                model_only=False)

            if not inference:

                self.writer = SummaryWriter(conf.log_path)
                self.step = 0

                paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

                if conf.use_mobilfacenet:
                    params = [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }]
                    wd = 4e-5
                else:
                    # if conf.net_mode in ['ir', 'ir_se']:
                    # params = [
                    #     {'params': paras_wo_bn + [self.head.weight], 'weight_decay': 5e-4},
                    #     {'params': paras_only_bn}
                    # ]
                    params = [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }]
                    wd = 5e-4
                    # else:
                    #     params = self.model.parameters()
                    #     wd = conf.wd
                    #     # params = [
                    #     #     {'params': paras_wo_bn + [self.head.weight], 'weight_decay': conf.wd},  # 5e-4},
                    #     #     {'params': paras_only_bn}
                    #     # ]

                if conf.optimizer == 'sgd':
                    self.optimizer = optim.SGD(
                        params, lr=conf.lr,
                        momentum=conf.momentum)  # , weight_decay=wd)
                elif conf.optimizer == 'adam':
                    self.optimizer = optim.Adam(
                        params, lr=conf.lr)  # , weight_decay=wd)
                print(self.optimizer)
                #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

                print('optimizers generated')
                self.board_loss_every = len(self.loader) // 100
                self.evaluate_every = len(self.loader) // 10
                self.save_every = len(self.loader) // 5

                self.board_loss_every = 20
                self.evaluate_every = len(self.loader)
                self.save_every = len(self.loader)
                if conf.data_mode == 'common':
                    import json
                    val_img_dir_map = json.loads(conf.val_img_dirs)
                    self.val_dataloaders = {}
                    for val_name in val_img_dir_map:
                        val_img_dir = val_img_dir_map[val_name]
                        val_dataloader, common_val_issame = get_common_val_data(
                            val_img_dir,
                            conf.max_positive_cnt,
                            conf.val_batch_size,
                            conf.val_pin_memory,
                            conf.num_workers,
                            val_transforms=val_transforms,
                            use_pos=not conf.not_use_pos,
                            use_neg=not conf.not_use_neg,
                            val_smapling_type=conf.val_smapling_type,
                            use_keras_model=conf.use_val_left_right_check)
                        self.val_dataloaders[val_name] = [
                            val_dataloader, common_val_issame
                        ]
                elif conf.data_mode == 'dacon_landmark':

                    pass
                else:
                    self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                        self.loader.dataset.root.parent)
            else:
                self.threshold = conf.threshold
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            print('prepare train loader..')
            self.loader, self.class_num = get_train_loader(conf)
            current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            self.writer = SummaryWriter(str(conf.log_path / current_time))
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            self.lrscheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer, factor=0.1, patience=20, verbose=True)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # len(self.loader): number of batches?
            self.board_loss_every = len(self.loader) // 120
            self.evaluate_every = len(self.loader) // 40
            self.save_every = len(self.loader) // 40
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
Beispiel #12
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 60
            self.save_every = len(self.loader) // 1
            print("????????", len(self.loader), self.board_loss_every,
                  self.evaluate_every, self.save_every, "||||||")
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_data, self.val_label = get_val_dataset(conf.val_path)
            self.val_data, self.val_label = get_val_dataset(
                conf.emore_folder_val)
            # self.imgs_val, self.labels_val = get_val_data(conf.emore_folder_val, stop_num=5000)
            """
            if conf.resuse:
                print("resue model, optimizer, and head!!")
                self.model.load_state_dict(torch.load('./work_space/models/model_2019-08-16-17-31_accuracy:0.9247680739559259_step:5773_None.pth'))
                self.optimizer.load_state_dict(torch.load('./work_space/models/optimizer_2019-08-16-17-31_accuracy:0.9247680739559259_step:5773_None.pth'))
                self.head.load_state_dict(torch.load('./work_space/models/head_2019-08-16-17-31_accuracy:0.9247680739559259_step:5773_None.pth'))
            """
        else:
            self.threshold = conf.threshold
Beispiel #13
0
    def __init__(self, conf, args, inference=False):
        print(conf)
        self.local_rank = args.local_rank
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).cuda()

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #[self.model, self.head], self.optimizer = amp.initialize([self.model, self.head], self.optimizer, opt_level='O1')
            [self.model, self.head
             ], self.optimizer = amp.initialize([self.model, self.head],
                                                self.optimizer,
                                                opt_level='O3',
                                                keep_batchnorm_fp32=True)
            print(self.optimizer, args.local_rank)
            self.head = DistributedDataParallel(self.head)
            self.model = DistributedDataParallel(self.model)
            #self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[args.local_rank])
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
Beispiel #14
0
    def __init__(self, conf, train=True):
        make_dir(conf.work_path)
        make_dir(conf.model_path)
        make_dir(conf.log_path)

        if conf.gpu_ids:
            assert torch.cuda.is_available(), 'GPU is not avalialble!'
            torch.backends.cudnn.benckmark = True
            conf.device = torch.device('cuda')
        else:
            conf.device = torch.device('cpu')

        self.gpu_ids = conf.gpu_ids

        self.model = None
        self.net_type = '{}_{}'.format(conf.net_mode, conf.net_depth)
        if conf.net_mode == 'ir' or conf.net_mode == 'ir_se':
            self.model = IRNet(conf.net_depth, conf.drop_ratio,
                               conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))
        elif conf.net_mode == 'resnet':
            if conf.net_depth == 18:
                self.model = ResNet_18().to(conf.device)
            elif conf.net_depth == 34:
                self.model = ResNet_34().to(conf.device)
            elif conf.net_depth == 50:
                self.model = ResNet_50().to(conf.device)
            elif conf.net_depth == 101:
                self.model = ResNet_101().to(conf.device)
            elif conf.net_depth == 152:
                self.model = ResNet_152().to(conf.device)
            else:
                raise NotImplementedError(
                    "Model {}_{} is not implemented".format(
                        conf.net_mode, conf.net_depth))
        elif conf.net_mode == 'lightcnn':
            if conf.net_depth == 9:
                self.model = LightCNN_9Layers(drop_ratio=conf.drop_ratio).to(
                    conf.device)
            elif conf.net_depth == 29:
                self.model = LightCNN_29Layers(drop_ratio=conf.drop_ratio).to(
                    conf.device)
            else:
                raise NotImplementedError(
                    "Model {}_{} is not implemented".format(
                        conf.net_mode, conf.net_depth))
        else:
            NotImplementedError("Model {}_{} is not implemented".format(
                conf.net_mode, conf.net_depth))

        assert self.model is not None, "Model is NONE!!"

        if train:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_ADAM:
                self.optimizer = optim.Adam(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    betas=(0.9, 0.999))
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)

            # if conf.cosine_lr == True:
            #     self.scheduler = CosineAnnealingWarmUpRestarts(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                conf.data_folder)
Beispiel #15
0
    def __init__(self, conf, inference=False, need_loader=True):
        print(conf)
        if conf.use_mobilfacenet:
            # self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            self.model = torch.nn.DataParallel(
                MobileFaceNet(conf.embedding_size)).cuda()
            print('MobileFaceNet model generated')
        else:
            # self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            self.model = torch.nn.DataParallel(
                Backbone(conf.net_depth, conf.drop_ratio,
                         conf.net_mode)).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            if need_loader:
                # self.loader, self.class_num = get_train_loader(conf)

                self.dataset = Dataset2()
                self.loader = DataLoader(self.dataset,
                                         batch_size=conf.batch_size,
                                         num_workers=conf.num_workers,
                                         shuffle=True,
                                         pin_memory=True)

                # self.loader = Loader2(conf)
                self.class_num = 85164
                print(self.class_num, 'classes, load ok ')
            else:
                import copy
                conf_t = copy.deepcopy(conf)
                conf_t.data_mode = 'emore'
                self.loader, self.class_num = get_train_loader(conf_t)
                print(self.class_num)
                self.class_num = 85164
            lz.mkdir_p(conf.log_path, delete=True)
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            if conf.loss == 'arcface':
                self.head = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num).to(conf.device)
            elif conf.loss == 'softmax':
                self.head = MySoftmax(embedding_size=conf.embedding_size,
                                      classnum=self.class_num).to(conf.device)
            else:
                raise ValueError(f'{conf.loss}')

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)
            print('optimizers generated')
            self.board_loss_every = 100  # len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root_path)
        else:
            self.threshold = conf.threshold
Beispiel #16
0
    def __init__(self, conf, inference=False):
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            self.growup = GrowUP().to(conf.device)
            self.discriminator = Discriminator().to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:

            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            if conf.discriminator:
                self.child_loader, self.adult_loader = get_train_loader_d(conf)

            os.makedirs(conf.log_path, exist_ok=True)
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            # Will not use anymore
            if conf.use_dp:
                self.model = nn.DataParallel(self.model)
                self.head = nn.DataParallel(self.head)

            print(self.class_num)
            print(conf)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            if conf.discriminator:
                self.optimizer_g = optim.Adam(self.growup.parameters(),
                                              lr=1e-4,
                                              betas=(0.5, 0.999))
                self.optimizer_g2 = optim.Adam(self.growup.parameters(),
                                               lr=1e-4,
                                               betas=(0.5, 0.999))
                self.optimizer_d = optim.Adam(self.discriminator.parameters(),
                                              lr=1e-4,
                                              betas=(0.5, 0.999))
                self.optimizer2 = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)

            if conf.finetune_model_path is not None:
                self.optimizer = optim.SGD([{
                    'params': paras_wo_bn,
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                                           lr=conf.lr,
                                           momentum=conf.momentum)
            print('optimizers generated')

            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 2
            self.save_every = len(self.loader)

            dataset_root = "/home/nas1_userD/yonggyu/Face_dataset/face_emore"
            self.lfw = np.load(
                os.path.join(dataset_root,
                             "lfw_align_112_list.npy")).astype(np.float32)
            self.lfw_issame = np.load(
                os.path.join(dataset_root, "lfw_align_112_label.npy"))
            self.fgnetc = np.load(
                os.path.join(dataset_root,
                             "FGNET_new_align_list.npy")).astype(np.float32)
            self.fgnetc_issame = np.load(
                os.path.join(dataset_root, "FGNET_new_align_label.npy"))
        else:
            # Will not use anymore
            # self.model = nn.DataParallel(self.model)
            self.threshold = conf.threshold
Beispiel #17
0
    def __init__(self, conf, inference=False):
        # for time logging
        self.time_start = datetime.datetime.now()
        print("time_start: ", self.time_start + datetime.timedelta(hours=9))
        print(conf)
        if conf.net_mode == 'mobilefacenet':
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        elif conf.net_mode == 'efficientnet':
            self.model = EfficientNet.from_name('efficientnet-b' +
                                                str(conf.net_depth))
            # fc1 = torch.nn.Linear(1280, conf.embedding_size, bias=True)
            bn1 = torch.nn.BatchNorm1d(conf.embedding_size)
            # self.model = torch.nn.Sequential(self.model, fc1, bn1)
            fc_in = self.model._fc.in_features
            self.model._fc = torch.nn.Linear(fc_in, conf.embedding_size)
            self.model = torch.nn.Sequential(self.model, bn1)
            self.model = self.model.to(conf.device)
        elif conf.net_mode == 'mobilenetv3':
            self.model = mobilenetv3_large()
            fc_in = self.model.classifier[3].in_features
            self.model.classifier[3] = torch.nn.Linear(fc_in,
                                                       conf.embedding_size)
            self.model = self.model.to(conf.device)
        elif conf.net_mode in ['ir', 'ir_se']:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))
        else:
            print('net_mode error!')
            sys.exit(-1)

        # check parameter of model
        print("------------------------------------------------------------")
        total_params = sum(p.numel() for p in self.model.parameters())
        print("num of parameter :", total_params / 1000000, "M")
        trainable_params = sum(p.numel() for p in self.model.parameters()
                               if p.requires_grad)
        print("num of trainable_ parameter :", trainable_params / 1000000, "M")
        print("------------------------------------------------------------")

        if not inference:
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter()
            self.step = conf.start_step
            if conf.net_mode in ['efficientnet', 'mobilenetv3']:
                self.head = ArcMarginProduct(conf.embedding_size,
                                             self.class_num,
                                             s=30,
                                             m=0.5,
                                             easy_margin=False).to(conf.device)
            else:
                self.head = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num,
                                    s=30).to(conf.device)
            self.lr_gamma = conf.lr_gamma

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            print("Initialize optimizer")
            if conf.net_mode == 'mobilefacenet':
                if conf.optimizer_mode == 'SGD':
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn[:-1],
                            'weight_decay': 4e-5
                        }, {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            elif conf.net_mode == 'efficientnet':
                if conf.optimizer_mode == 'Adam':
                    self.optimizer = optim.Adam(
                        [{
                            'params': self.model.parameters()
                        }], lr=conf.lr)
                elif conf.optimizer_mode == 'RMSprop':
                    self.optimizer = optim.RMSprop(
                        [{
                            'params': self.model.parameters()
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            elif conf.net_mode == 'mobilenetv3':
                self.optimizer = optim.Adam([{
                    'params': self.model.parameters()
                }],
                                            lr=conf.lr)
            else:  ## ir, ir_se
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)

            # for i in range(conf.start_epoch): ## start_epoch 만큼 scheduler 작동시켜놓아주기
            #     self.scheduler.step()
            if conf.scheduler_mode == 'auto':
                self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                    self.optimizer,
                    mode='max',
                    factor=self.lr_gamma,
                    patience=conf.patience,
                    verbose=True)
            elif conf.scheduler_mode == 'multistep':
                self.scheduler = optim.lr_scheduler.MultiStepLR(
                    optimizer=self.optimizer,
                    milestones=conf.milestones,
                    gamma=self.lr_gamma)

            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader)
            self.save_every = len(self.loader)
        else:
            self.threshold = conf.threshold
Beispiel #18
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        elif conf.use_shufflenetV2:
            #self.model = ShuffleNetV2().to(conf.device)
            self.model = model.shufflenet().to(conf.device)
            print("ShufflenetV2 model generated")
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            # elif conf.use_shufflenetV2:
            #     self.optimizer = optim.SGD([
            #                         {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
            #                         {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
            #                         {'params': paras_only_bn}
            #                     ], lr = conf.lr*10, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr * 10,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                '/root/NewDisk/daxing2/WW/data/faces_vgg_112x112/'
            )  #self.loader.dataset.root.parent
        else:
            self.threshold = conf.threshold
Beispiel #19
0
def train(epochs):
    logging.debug("Prepare Data")

    all_class = [i for i in os.listdir(data_folder)]
    class_num = len(all_class)
    regular_class = []
    for idx, i in enumerate(all_class):
        if len(os.listdir(os.path.join(data_folder, i))) > 20:
            regular_class.append(i)
        if idx % 20000 == 0:
            print("Processing...", idx)
    loader, class_num = get_train_loader(data_folder_stage2, batch_size)
    agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame = get_val_data(
        val_folder)

    ## Load Model
    enc = Encoder().to(device)
    dec = Decoder().to(device)
    r = Distillation_R().to(device)
    head = FC_softmax(320, class_num).to(device)

    enc.load_state_dict(
        torch.load(
            os.path.join(pretrained_model_path,
                         'enc_{}_{}.pth'.format(assigned_epoch,
                                                assigned_step))))
    dec.load_state_dict(
        torch.load(
            os.path.join(pretrained_model_path,
                         'dec_{}_{}.pth'.format(assigned_epoch,
                                                assigned_step))))
    r.load_state_dict(
        torch.load(
            os.path.join(pretrained_model_path,
                         'r_{}_{}.pth'.format(assigned_epoch, assigned_step))))
    head.load_state_dict(
        torch.load(
            os.path.join(
                pretrained_model_path,
                'head_{}_{}.pth'.format(assigned_epoch, assigned_step))))
    enc.eval()
    dec.eval()
    r.eval()
    head.eval()
    model = [enc, dec, r]

    ## Set Training Criterion
    ce_loss = nn.CrossEntropyLoss()
    l2_loss = nn.MSELoss()
    optimizer = optim.Adam(model[0].parameters(), lr=0.00001)
    optimizer.add_param_group({'params': model[1].parameters()})
    optimizer.add_param_group({'params': model[2].parameters()})
    optimizer.add_param_group({'params': head.parameters()})

    logging.debug("Start Training")
    ## Initial Training
    running_loss = 0
    step = 0
    acc_max = 0

    for e in range(epochs):
        for stage in [1, 2]:
            ## Initial Train For Stage 1 and 2
            if stage == 1:
                center, Q, h, ur_class_file = None, None, None, None
                center, Q, h, ur_class_file = UpdateStats(
                    model[0], data_folder, all_class, regular_class)
                logging.debug(
                    "Center_num:{}, h_num:{}, ur_class_num:{}".format(
                        len(center), len(h), len(ur_class_file)))
            else:
                iterr = iter(loader)
            ## Start Training
            for step_stage in range(20000):
                if stage == 1:
                    regular_batch = random.sample(range(len(h)), batch_size)
                    ur_batch = random.sample(range(len(ur_class_file)),
                                             batch_size)
                    g_r_list, g_u_list, g_t_list, label_r_list, label_u_list, label_t_list = list(
                    ), list(), list(), list(), list(), list()
                    for idx_r, idx_u in zip(regular_batch, ur_batch):
                        ##Prepare data for First Batch ()
                        label_r, g_r = h[idx_r]
                        c_r = center[label_r]
                        g_r_list.append(g_r)
                        label_r_list.append(torch.tensor(int(label_r)))
                        ##Prepare data for Second Batch
                        label_u, g_u = ur_class_file[idx_u]
                        c_u = center[label_u]
                        g_u_list.append(g_u)
                        label_u_list.append(torch.tensor(int(label_u)))
                        ##Prepare data for Third Batch
                        g_t = c_u + torch.mm(Q, (g_r - c_r).t()).t()
                        g_t_list.append(g_t.view(-1))
                        label_t_list.append(torch.tensor(int(label_u)))

                    g_r_list = torch.stack(g_r_list).to(device)
                    label_r_list = torch.stack(label_r_list).to(device)
                    g_u_list = torch.stack(g_u_list).to(device)
                    label_u_list = torch.stack(label_u_list).to(device)
                    g_t_list = torch.stack(g_t_list).to(device)
                    label_t_list = torch.stack(label_t_list).to(device)

                    for g, labels in zip(
                        [g_r_list, g_u_list, g_t_list],
                        [label_r_list, label_u_list, label_t_list]):
                        optimizer.zero_grad()
                        embs = model[2](g)
                        fc_out = head(embs)
                        loss_ce = ce_loss(fc_out, labels)
                        loss_reg = l2_norm(fc_out)
                        loss = loss_ce + loss_reg * 0.25
                        loss.backward()
                        running_loss += loss.item()
                        optimizer.step()
                    running_loss /= 3

                elif stage == 2:
                    imgs, labels = next(iterr)
                    imgs = imgs.to(device)
                    labels = labels.to(device)
                    optimizer.zero_grad()
                    g = model[0](imgs)
                    img_decs = model[1](g)
                    with torch.no_grad():
                        embs = model[2](g)
                        fc_out = head(embs)
                    loss_ce = ce_loss(fc_out, labels)
                    loss_mse = l2_loss(imgs, img_decs)
                    loss_reg = l2_norm(fc_out)
                    loss = loss_ce + loss_mse + loss_reg * 0.25
                    loss.backward()
                    running_loss += loss.item()
                    optimizer.step()

                if step % board_loss_every == 0 and step != 0:
                    loss_board = running_loss / board_loss_every
                    printout = "stage:{}, step:{}, epoch:{}, train_loss:{}".format(
                        stage, step, e, loss_board)
                    logging.debug(printout)
                    running_loss = 0

                if step % evaluate_every == 0 and step != 0:
                    accuracy, best_threshold = verify(model, agedb_30,
                                                      agedb_30_issame)
                    printout = "dataset:age30db, acc:{}, best_threshold:{}".format(
                        accuracy, best_threshold)
                    logging.debug(printout)
                    accuracy, best_threshold = verify(model, lfw, lfw_issame)
                    printout = "dataset:lfw, acc:{}, best_threshold:{}".format(
                        accuracy, best_threshold)
                    logging.debug(printout)
                    accuracy, best_threshold = verify(model, cfp_fp,
                                                      cfp_fp_issame)
                    printout = "dataset:cfp_fp, acc:{}, best_threshold:{}".format(
                        accuracy, best_threshold)
                    logging.debug(printout)
                    if accuracy > acc_max:
                        torch.save(
                            model[0].state_dict(),
                            '{}/enc_alter_{}_{}.pth'.format(
                                save_path, e, step))
                        torch.save(
                            model[1].state_dict(),
                            '{}/dec_alter_{}_{}.pth'.format(
                                save_path, e, step))
                        torch.save(
                            model[2].state_dict(),
                            '{}/r_alter_{}_{}.pth'.format(save_path, e, step))
                        torch.save(
                            head.state_dict(),
                            '{}/head_alter_{}_{}.pth'.format(
                                save_path, e, step))
                        acc_max = accuracy
                        logging.debug("Save ckpt at epoch:{} step:{}".format(
                            e, step))

                step += 1
Beispiel #20
0
    def __init__(self, conf, inference=False):
        print(conf)
        self.lr=conf.lr
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            
                print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.model = resnet101().to(conf.device)
                print('resnet101 model generated')
            
        
        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)        

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
                self.head_race = Arcface(embedding_size=conf.embedding_size, classnum=4).to(conf.device)
        
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.head = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
                self.head_race = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            
            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')    
            print('len of loader:',len(self.loader)) 
            self.board_loss_every = len(self.loader)//min(len(self.loader),100)
            self.evaluate_every = len(self.loader)//1
            self.save_every = len(self.loader)//1
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(conf.val_folder)
        else:
            #self.threshold = conf.threshold
            pass
Beispiel #21
0
    def __init__(self, conf, inference=False):
        accuracy = 0.0
        logger.debug(conf)
        if conf.use_mobilfacenet:
            # self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            self.model = MobileFaceNet(conf.embedding_size).cuda()
            logger.debug('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).cuda()  #.to(conf.device)
            logger.debug('{}_{} model generated'.format(
                conf.net_mode, conf.net_depth))
        if not inference:
            self.milestones = conf.milestones
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            logger.info('loading data...')
            self.loader_arc, self.class_num_arc = get_train_loader(
                conf, 'emore', sample_identity=False)
            # self.loader_tri, self.class_num_tri = get_train_loader(conf, 'glint', sample_identity=True)
            emore_root = conf.data_path / 'train' / 'faces_emore_16_per_peron' / 'imgs'
            kc_root = conf.data_path / 'test' / 'kc_employee_dynamic_112'
            self.loader_tri, self.class_num_tri = get_train_loader_concat(
                conf, [emore_root, kc_root], sample_identity=True)

            self.head_arc = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num_arc).cuda()
            self.head_tri = Triplet().cuda()
            logger.debug('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head_arc.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head_arc.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            # self.optimizer = torch.nn.parallel.DistributedDataParallel(optimizer,device_ids=[conf.argsed])
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            # 断点加载训练
            if conf.resume:
                logger.debug('resume...')
                self.load_state(conf, 'last.pth', from_save_folder=True)

            if conf.fp16:
                self.model, self.optimizer = amp.initialize(self.model,
                                                            self.optimizer,
                                                            opt_level="O2")
                self.model = DistributedDataParallel(self.model).cuda()
            else:
                self.model = torch.nn.parallel.DistributedDataParallel(
                    self.model,
                    device_ids=[conf.argsed],
                    find_unused_parameters=True).cuda(
                    )  #add line for distributed

            self.board_loss_every = len(self.loader_arc) // 100
            self.evaluate_every = len(self.loader_arc) // 2
            self.save_every = len(self.loader_arc) // 2

            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(Path(self.loader_arc.dataset.root).parent)
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                conf.emore_folder)
        else:
            self.threshold = conf.threshold
            self.loader, self.query_ds, self.gallery_ds = get_test_loader(conf)
Beispiel #22
0
def train(epochs):
    logging.debug("Prepare Data")

    loader, class_num = get_train_loader(data_folder, batch_size)
    agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame = get_val_data(
        val_folder)

    ## Load Model
    enc = Encoder().to(device)
    dec = Decoder().to(device)
    r = Distillation_R().to(device)
    model = [enc, dec, r]
    head = FC_softmax(320, class_num).to(device)
    if pretrained:
        enc.load_state_dict(
            torch.load(
                os.path.join(
                    pretrained_model_path,
                    'enc_{}_{}.pth'.format(assigned_epoch, assigned_step))))
        dec.load_state_dict(
            torch.load(
                os.path.join(
                    pretrained_model_path,
                    'dec_{}_{}.pth'.format(assigned_epoch, assigned_step))))
        r.load_state_dict(
            torch.load(
                os.path.join(
                    pretrained_model_path,
                    'r_{}_{}.pth'.format(assigned_epoch, assigned_step))))
        head.load_state_dict(
            torch.load(
                os.path.join(
                    pretrained_model_path,
                    'head_{}_{}.pth'.format(assigned_epoch, assigned_step))))
        enc.eval()
        dec.eval()
        r.eval()
        head.eval()

    model = [enc, dec, r]

    ## Set Training Criterion
    ce_loss = nn.CrossEntropyLoss()
    l2_loss = nn.MSELoss()
    optimizer = optim.Adam(model[0].parameters(), lr=0.0002)
    optimizer.add_param_group({'params': model[1].parameters()})
    optimizer.add_param_group({'params': model[2].parameters()})
    optimizer.add_param_group({'params': head.parameters()})

    ## Initial Training
    running_loss = 0
    step = 0
    acc_max = 0

    logging.debug("Start Training")
    for e in range(epochs):
        for imgs, labels in iter(loader):
            imgs = imgs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            g = model[0](imgs)
            img_decs = model[1](g)
            embs = model[2](g)
            fc_out = head(embs)
            loss_ce = ce_loss(fc_out, labels)
            loss_mse = l2_loss(imgs, img_decs)
            loss_reg = l2_norm(fc_out)
            loss = loss_ce + loss_mse + loss_reg * 0.25
            loss.backward()
            running_loss += loss.item()
            optimizer.step()

            if step % board_loss_every == 0 and step != 0:
                loss_board = running_loss / board_loss_every
                printout = "step:{}, epoch:{}, train_loss:{}".format(
                    step, e, loss_board)
                logging.debug(printout)
                running_loss = 0.

            if step % evaluate_every == 0 and step != 0:
                accuracy, best_threshold = verify(model, agedb_30,
                                                  agedb_30_issame)
                printout = "dataset:age30db, acc:{}, best_threshold:{}".format(
                    accuracy, best_threshold)
                logging.debug(printout)
                accuracy, best_threshold = verify(model, lfw, lfw_issame)
                printout = "dataset:lfw, acc:{}, best_threshold:{}".format(
                    accuracy, best_threshold)
                logging.debug(printout)
                accuracy, best_threshold = verify(model, cfp_fp, cfp_fp_issame)
                printout = "dataset:cfp_fp, acc:{}, best_threshold:{}".format(
                    accuracy, best_threshold)
                logging.debug(printout)
                if accuracy > acc_max:
                    torch.save(model[0].state_dict(),
                               'ckpt/enc_{}_{}.pth'.format(e, step))
                    torch.save(model[1].state_dict(),
                               'ckpt/dec_{}_{}.pth'.format(e, step))
                    torch.save(model[2].state_dict(),
                               'ckpt/r_{}_{}.pth'.format(e, step))
                    torch.save(head.state_dict(),
                               'ckpt/head_{}_{}.pth'.format(e, step))
                    acc_max = accuracy
                    logging.debug("Save ckpt at epoch:{} step:{}".format(
                        e, step))
            step += 1
    def __init__(self, conf, inference=False, transfer=0):
        pprint.pprint(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                if transfer == 3:
                    self.optimizer = optim.SGD(
                        [{
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
                elif transfer == 2:
                    self.optimizer = optim.SGD([
                        {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                elif transfer == 1:
                    self.optimizer = optim.SGD([
                        {
                            'params': [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn[:-1],
                            'weight_decay': 4e-5
                        }, {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 5  # originally, 100
            self.evaluate_every = len(self.loader) // 5  # originally, 10
            self.save_every = len(self.loader) // 2  # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            self.val_112, self.val_112_issame = get_val_pair(
                self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold
Beispiel #24
0
    def __init__(self, conf, inference=False, embedding_size=512):
        conf.embedding_size = embedding_size
        print(conf)

        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).cuda()
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        parameter_num_cal(self.model)

        self.milestones = conf.milestones
        self.loader, self.class_num = get_train_loader(conf)
        self.step = 0
        self.agedb_30, self.cfp_fp, self.lfw, self.calfw, self.cplfw, self.vgg2_fp, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.calfw_issame, self.cplfw_issame, self.vgg2_fp_issame = get_val_data(
            self.loader.dataset.root.parent)
        self.writer = SummaryWriter(conf.log_path)

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            if conf.multi_sphere:
                if conf.arcface_loss:
                    self.head = ArcfaceMultiSphere(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_shpere=conf.num_sphere,
                        m=conf.m).to(conf.device)
                elif conf.am_softmax:
                    self.head = MultiAm_softmax(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_sphere=conf.num_sphere,
                        m=conf.m).to(conf.device)
                else:
                    self.head = MultiSphereSoftmax(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_sphere=conf.num_sphere).to(conf.device)

            else:
                if conf.arcface_loss:
                    self.head = Arcface(embedding_size=conf.embedding_size,
                                        classnum=self.class_num).to(
                                            conf.device)
                elif conf.am_softmax:
                    self.head = Am_softmax(embedding_size=conf.embedding_size,
                                           classnum=self.class_num).to(
                                               conf.device)
                else:
                    self.head = Softmax(embedding_size=conf.embedding_size,
                                        classnum=self.class_num).to(
                                            conf.device)

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                if conf.multi_sphere:
                    self.optimizer = optim.SGD([{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + self.head.kernel_list,
                        'weight_decay':
                        4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn[:-1],
                            'weight_decay': 4e-5
                        }, {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            else:
                if conf.multi_sphere:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn + self.head.kernel_list,
                            'weight_decay': 5e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn + [self.head.kernel],
                            'weight_decay': 5e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)

            print(self.optimizer)

            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.calfw, self.cplfw, self.vgg2_fp, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.calfw_issame, self.cplfw_issame, self.vgg2_fp_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
Beispiel #25
0
    # no-targeted L2 attack through self-targeted
    adversary = L2MomentumIterativeAttack(
        learner.model, loss_fn=attack_cosine_distance(target=-torch.ones(1).to(conf.device), margin=0.1), eps=10,
        nb_iter=40, eps_iter=0.2, decay_factor=1., clip_min=-1.0, clip_max=1.0,
        targeted=True)
    '''
    '''
    #no-targeted L2 attack    
    adversary = L2MomentumIterativeAttack(
        learner.model, loss_fn=attack_cosine_distance(target=torch.ones(1).to(conf.device)), eps=10,
        nb_iter=40, eps_iter=0.2, decay_factor=1., clip_min=-1.0, clip_max=1.0,
        targeted=False)
    '''

    #image = Image.open("/hd1/anshengnan/InsightFace_Pytorch/data/test.jpg")
    loader, class_num = get_train_loader(conf)

    head = Arcface(embedding_size=conf.embedding_size,
                   classnum=class_num).to(conf.device)

    with ts.snoop():
        for imgs, labels in tqdm(iter(loader)):
            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            embeddings = learner.model(imgs)
            thetas = head(embeddings, labels)
            adv_targeted = adversary.perturb(imgs, labels)
            adv_embeddings = learner.model(adv_targeted)
            adv_thetas = head(adv_embeddings, labels)
            print(labels)
            thetas = list(thetas.squeeze(0))
Beispiel #26
0
    def __init__(self, conf, inference=False, transfer=0, ext='final'):
        pprint.pprint(conf)
        self.conf = conf
        if conf.arch == "mobile":
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        elif conf.arch == "ir_se":
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.arch).to(conf.device)
            print('{}_{} model generated'.format(conf.arch, conf.net_depth))
        elif conf.arch == "resnet50":
            self.model = ResNet(embedding_size=512,
                                arch=conf.arch).to(conf.device)
            print("resnet model {} generated".format(conf.arch))
        else:
            exit("model not supported yet!")

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            tmp_idx = ext.rfind('_')  # find the last '_' to replace it by '/'
            self.ext = '/' + ext[:tmp_idx] + '/' + ext[tmp_idx + 1:]
            self.writer = SummaryWriter(str(conf.log_path) + self.ext)
            self.step = 0

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if transfer == 3:
                self.optimizer = optim.Adam(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 2:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': paras_wo_bn + [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 1:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            else:
                """
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
                """
                self.optimizer = optim.Adam(list(self.model.parameters()) +
                                            list(self.head.parameters()),
                                            lr=conf.lr)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.save_freq = len(self.loader)  #//5 # originally, 100
            self.evaluate_every = len(self.loader)  #//5 # originally, 10
            self.save_every = len(self.loader)  #//2 # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_112, self.val_112_issame = get_val_pair(self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold

        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_accuracy = []
        self.test_counter = []