示例#1
0
    def __init__(self, use_mobilfacenet):

        self.embedding_size = 512
        self.net_depth = 50
        self.drop_ratio = 0.6
        self.net_mode = 'ir_se'  # or 'ir'
        self.threshold = 1.2
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")

        self.face_detector = MTCNN()

        self.test_transform = trans.Compose([
            trans.ToTensor(),
            trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

        if use_mobilfacenet:
            self.model = MobileFaceNet(self.embedding_size).to(self.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(self.net_depth, self.drop_ratio,
                                  self.net_mode).to(self.device)
            print('{}_{} model generated'.format(self.net_mode,
                                                 self.net_depth))

        self.threshold = self.threshold
示例#2
0
def test(args):
    device = torch.device(('cuda:%d' %
                           args.gpu) if torch.cuda.is_available() else 'cpu')
    BACKBONE = Backbone([args.input_size, args.input_size], args.num_layers,
                        args.mode)
    BACKBONE.load_state_dict(torch.load(args.ckpt_path))
    BACKBONE.to(device)
    BACKBONE.eval()
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    print('Start test at', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    # accuracy
    with open(args.pair_file, 'r') as f:
        pairs = f.readlines()
    sims = []
    labels = []
    for pair_id, pair in tqdm.tqdm(enumerate(pairs)):
        # print('processing %d/%d...' % (pair_id, len(pairs)), end='\r')
        splits = pair.split()
        feat1 = get_feature(os.path.join(args.data_root, splits[0]), transform,
                            BACKBONE, device)
        feat2 = get_feature(os.path.join(args.data_root, splits[1]), transform,
                            BACKBONE, device)
        label = int(splits[2])
        sim = np.dot(feat1,
                     feat2) / (np.linalg.norm(feat1) * np.linalg.norm(feat2))
        sims.append(sim)
        labels.append(label)
    acc, th = cal_accuracy(np.array(sims), np.array(labels))
    print('acc=%f with threshold=%f' % (acc, th))
    print('Finish test at', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
示例#3
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(
                '/home/zzg/DeepLearning/InsightFace_Pytorch/work_space/log')
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 29000  #100
            self.evaluate_every = len(self.loader) // 500  ##10
            self.save_every = len(self.loader) // 290  #5
            #            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            self.agedb_30, self.agedb_30_issame = get_val_data(
                '/home/zzg/DeepLearning/InsightFace_Pytorch/data/faces_emore')
        else:
            self.threshold = conf.threshold
    def __init__(self, conf, inference=False):
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            # print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            # print('{}_{} model generated done !'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            # print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            rootdir = os.path.join(args.root_dir, args.rec_path)
            self.board_loss_every = len(self.loader) // len(self.loader)
            self.evaluate_every = len(self.loader) // 1
            # self.save_every = len(self.loader)//len(self.loader)   # 5
            print('board loss every: {} -> evaluate_every: {} \n'.format(
                self.board_loss_every, self.evaluate_every))
            print('loader paths of validation dataset {}'.format(rootdir))
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                rootdir)
        else:
            self.threshold = conf.threshold
示例#5
0
    def __init__(self, conf, inference=False):
        print(conf)
        print(conf.use_mobilfacenet)
        input("CONF")
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        
        if not inference:
            self.milestones = conf.milestones
            # Dataset Loader
            # ritorna un loader dataset ImageLoader
            self.loader, self.class_num = get_train_loader(conf)        

            # Classe di tensorboardX per salvare i log
            # log_path indica il percorso dove sono salvate le statistiche
            self.writer = SummaryWriter(conf.log_path)


            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)

            print('two model heads generated')
            # paras_only_bn contiene i layer con i parametri della batchnorm
            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            
            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # Parametri che indicano ogni quanto salvare i modelli e le epoche
            self.board_loss_every = len(self.loader)//10
            self.evaluate_every = len(self.loader)//10
            self.save_every = len(self.loader)//5
            print("DATASET")
            print(self.loader.dataset.root)
            # ritornano gli array e le labels delle diverse cartelle del dataset VALIDATION
            self.agedb_30 ,self.agedb_30_issame = get_val_data(self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
示例#6
0
文件: train.py 项目: buaazyz/face_reg
    def __init__(self, conf, inference=False):
        print(conf)
        self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
        print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        print('loading... finish')
        if not inference:
            self.milestones = conf.milestones
            dataset = Dataset(root=conf.data_path,
                              data_list_file=conf.datalist_path,
                              phase='train',
                              input_shape=(1, 112, 112))
            self.loader = data.DataLoader(dataset, conf.batch_size)
            self.class_num = conf.class_num
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                    {'params': paras_only_bn}
                ], lr=conf.lr, momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD([
                    {'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                    {'params': paras_only_bn}
                ], lr=conf.lr, momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')

            self.load_state(conf, '2019-11-10-22-01.pth', True, False)
            # self.load_state(conf,'111.pth',False,True)
            print('load save')

            self.board_loss_every = len(self.loader) // 2
            print(len(self.loader))
            # self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 2
            # self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 2
            # self.save_every = len(self.loader) // 5
            # self.essex, self.essex_issame = get_val_data(
            #     self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
示例#7
0
    def __init__(self, backbone, test_model_path):
        super().__init__()
        #        if backbone == 'resnet_face18':
        #            self.model = resnet_face()
        #        elif backbone == 'resnet18':
        #            self.model = resnet18()
        #        elif backbone == 'resnet34':
        #            self.model = resnet34()
        #        elif backbone == 'resnet50':
        #            self.model = resnet50()
        #        elif backbone == 'resnet101':
        #            self.model = resnet101()
        #        elif backbone == 'resnet152':
        #            self.model = resnet152()
        if backbone == 'resnet50':
            self.model = Backbone(50, drop_ratio=0, mode='ir_se')
        elif backbone == 'resnet101':
            self.model = Backbone(100, drop_ratio=0, mode='ir_se')
        elif backbone == 'resnet152':
            self.model = Backbone(152, drop_ratio=0, mode='ir_se')

        if test_model_path:
            self.model.load_state_dict(torch.load(test_model_path))
            print('arcface model loading...')
示例#8
0
    def __init__(self, conf, inference=False):
        accuracy = 0.0
        logger.debug(conf)
        if conf.use_mobilfacenet:
            # self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            self.model = MobileFaceNet(conf.embedding_size).cuda()
            logger.debug('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).cuda()#.to(conf.device)
            logger.debug('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        if not inference:
            self.milestones = conf.milestones
            logger.info('loading data...')
            self.loader, self.class_num = get_train_loader(conf, 'emore', sample_identity=True)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = CircleLoss(m=0.25, gamma=256.0).cuda()

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn, 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            # self.optimizer = torch.nn.parallel.DistributedDataParallel(optimizer,device_ids=[conf.argsed])
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            if conf.fp16:
                self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O2")
                self.model = DistributedDataParallel(self.model).cuda()
            else:
                self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[conf.argsed]).cuda() #add line for distributed

            self.board_loss_every = len(self.loader)//100
            self.evaluate_every = len(self.loader)//2
            self.save_every = len(self.loader)//2
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(Path(self.loader.dataset.root).parent)
        else:
            self.threshold = conf.threshold
            self.loader, self.query_ds, self.gallery_ds = get_test_loader(conf)
示例#9
0
    def __init__(self, conf, inference=False, transfer=0, ext='final'):
        pprint.pprint(conf)
        self.conf = conf
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            tmp_idx = ext.rfind('_')  # find the last '_' to replace it by '/'
            self.ext = '/' + ext[:tmp_idx] + '/' + ext[tmp_idx + 1:]
            self.writer = SummaryWriter(str(conf.log_path) + self.ext)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            self.optimizer = optim.Adam(
                list(self.model.parameters()) + list(self.head.parameters()),
                conf.lr)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.save_freq = len(self.loader) // 5  #//5 # originally, 100
            self.evaluate_every = len(self.loader)  #//5 # originally, 10
            self.save_every = len(self.loader)  #//2 # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_112, self.val_112_issame = get_val_pair(self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold

        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_accuracy = []
        self.test_counter = []
示例#10
0
    def __init__(self, conf, inference=False):
        print(conf)
        # self.loader, self.class_num = construct_msr_dataset(conf)
        self.loader, self.class_num = get_train_loader(conf)
        self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode)
        print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = QAMFace(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)
            self.focalLoss = FocalLoss()

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            self.optimizer = optim.SGD(
                [{
                    'params': paras_wo_bn + [self.head.kernel],
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                lr=conf.lr,
                momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 1000
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 2
        else:
            self.threshold = conf.threshold

        # 多GPU训练
        self.model = torch.nn.DataParallel(self.model)
        self.model.to(conf.device)
        self.head = torch.nn.DataParallel(self.head)
        self.head = self.head.to(conf.device)
示例#11
0
def create_model(num_classes=21, device=torch.device('cpu')):
    backbone = Backbone(pretrain_path='./pretrain/resnet50.pth')
    model = SSD300(backbone=backbone, num_classes=num_classes)

    pre_ssd_path = './pretrain/nvidia_ssdpyt_fp32.pt'
    pre_model_dict = torch.load(pre_ssd_path, map_location=device)
    pre_weights_dict = pre_model_dict['model']

    # only use the pre_trained bounding boxes regression weights
    del_conf_loc_dict = {}
    for k, v in pre_weights_dict.items():
        split_key = k.split('.')
        if 'conf' in split_key:
            continue
        del_conf_loc_dict.update({k: v})

    missing_keys, unexpected_keys = model.load_state_dict(del_conf_loc_dict,
                                                          strict=False)
    # if len(missing_keys) != 0 or len(unexpected_keys) != 0:
    #     print('missing_keys: ', missing_keys)
    #     print('unexpected_keys: ', unexpected_keys)
    return model
示例#12
0
def train(args):
    # gpu init
    multi_gpu = False
    if len(args.gpus.split(',')) > 1:
        multi_gpu = True
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    D = MultiscaleDiscriminator(
        input_nc=3,
        ndf=64,
        n_layers=3,
        use_sigmoid=False,
        norm_layer=torch.nn.InstanceNorm2d)  # pix2pix use MSEloss
    G = AAD_Gen()
    F = Backbone(50, drop_ratio=0.6, mode='ir_se')
    F.load_state_dict(torch.load(args.arc_model_path))
    E = Att_Encoder()

    optimizer_D = torch.optim.Adam(D.parameters(),
                                   lr=0.0004,
                                   betas=(0.0, 0.999))
    optimizer_GE = torch.optim.Adam([{
        'params': G.parameters()
    }, {
        'params': E.parameters()
    }],
                                    lr=0.0004,
                                    betas=(0.0, 0.999))

    if multi_gpu:
        D = DataParallel(D).to(device)
        G = DataParallel(G).to(device)
        F = DataParallel(F).to(device)
        E = DataParallel(E).to(device)
    else:
        D = D.to(device)
        G = G.to(device)
        F = F.to(device)
        E = E.to(device)

    if args.resume:
        if os.path.isfile(args.resume_model_path):
            print("Loading checkpoint from {}".format(args.resume_model_path))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            D.load_state_dict(checkpoint["state_dict_D"])
            G.load_state_dict(checkpoint["state_dict_G"])
            E.load_state_dict(checkpoint["state_dict_E"])
            #            optimizer_G.load_state_dict(checkpoint['optimizer_G'])
            optimizer_D.load_state_dict(checkpoint['optimizer_D'])
            optimizer_GE.load_state_dict(checkpoint['optimizer_GE'])
        else:
            print('Cannot found checkpoint {}'.format(args.resume_model_path))
    else:
        args.start_epoch = 1

    def print_with_time(string):
        print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + string)

    def weights_init(m):
        classname = m.__class__.__name__
        if isinstance(m, nn.Conv2d):
            nn.init.normal_(m.weight.data, 0.0, 0.02)
        if classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0)

    def set_requires_grad(nets, requires_grad=False):

        if not isinstance(nets, list):
            nets = [nets]
        for net in nets:
            if net is not None:
                for param in net.parameters():
                    param.requires_grad = requires_grad

    def trans_batch(batch):
        t = trans.Compose(
            [trans.ToPILImage(),
             trans.Resize((112, 112)),
             trans.ToTensor()])
        bs = batch.shape[0]
        res = torch.ones(bs, 3, 112, 112).type_as(batch)
        for i in range(bs):
            res[i] = t(batch[i].cpu())
        return res

    set_requires_grad(F, requires_grad=False)
    data_transform = trans.Compose([
        trans.ToTensor(),
        trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    #dataset = ImageFolder(args.data_path, transform=data_transform)
    dataset = FaceEmbed(args.data_path)
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             drop_last=True)
    D.apply(weights_init)
    G.apply(weights_init)
    E.apply(weights_init)

    for epoch in range(args.start_epoch, args.total_epoch + 1):
        D.train()
        G.train()
        F.eval(
        )  #   Only extract features!  # input dim=3,256,256   out dim=256 !
        E.train()

        for batch_idx, data in enumerate(data_loader):
            time_curr = time.time()
            iteration = (epoch - 1) * len(data_loader) + batch_idx
            try:
                source, target, label = data

                source = source.to(device)
                target = target.to(device)
                label = torch.LongTensor(label).to(device)

                #Zid =F(trans_batch(source))  # bs, 512
                Zid = F(
                    downsample(source[:, :, 50:-10, 30:-30], size=(112, 112)))
                Zatt = E(target)  # list:8  each:bs,,,
                Yst0 = G(Zid, Zatt)  # bs,3,256,256

                # train discriminators
                pred_gen = D(Yst0.detach())
                #pred_gen = list(map(lambda x: x[0].detach(), pred_gen))
                pred_real = D(target)
                optimizer_D.zero_grad()
                loss_real, loss_fake = loss_hinge_dis()(pred_gen, pred_real)
                L_dis = loss_real + loss_fake
                #    if batch_idx%3==0:
                L_dis.backward()
                optimizer_D.step()

                # train generators
                pred_gen = D(Yst0)
                L_gen = loss_hinge_gen()(pred_gen)
                #L_id = IdLoss()(F(trans_batch(Yst0)), Zid)
                L_id = IdLoss()(F(
                    downsample(Yst0[:, :, 50:-10, 30:-30], size=(112, 112))),
                                Zid)
                #Zatt = list(map(lambda x: x.detach(), Zatt))
                L_att = AttrLoss()(E(Yst0), Zatt)
                L_Rec = RecLoss()(Yst0, target, label)

                Loss = (L_gen + 10 * L_att + 5 * L_id + 10 * L_Rec).to(device)
                optimizer_GE.zero_grad()
                Loss.backward()
                optimizer_GE.step()

            except Exception as e:
                print(e)
                continue

            if batch_idx % args.log_interval == 0 or batch_idx == 20:
                time_used = time.time() - time_curr
                print_with_time(
                    'Train Epoch: {} [{}/{} ({:.0f}%)], L_dis:{:.4f}, loss_real:{:.4f}, loss_fake:{:.4f}, Loss:{:.4f}, L_gen:{:.4f}, L_id:{:.4f}, L_att:{:.4f}, L_Rec:{:.4f}'
                    .format(
                        epoch, batch_idx * len(data), len(data_loader.dataset),
                        100. * batch_idx *
                        len(data) / len(data_loader.dataset), L_dis.item(),
                        loss_real.item(), loss_fake.item(), Loss.item(),
                        L_gen.item(), 5 * L_id.item(), 10 * L_att.item(),
                        10 * L_Rec))
                time_curr = time.time()

        if epoch % args.save_interval == 0:  #or batch_idx*len(data) % 350004==0:
            state = {
                "epoch": epoch,
                "state_dict_D": D.state_dict(),
                "state_dict_G": G.state_dict(),
                "state_dict_E": E.state_dict(),
                "optimizer_D": optimizer_D.state_dict(),
                "optimizer_GE": optimizer_GE.state_dict(),
                #                        "optimizer_E": optimizer_E.state_dict(),
            }
            filename = "../model/train1_{:03d}_{:03d}.pth.tar".format(
                epoch, batch_idx * len(data))
            torch.save(state, filename)
示例#13
0
    def __init__(self,
                 conf,
                 inference=False,
                 train_transforms=None,
                 val_transforms=None,
                 train_loader=None):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:

            self.milestones = conf.milestones
            if train_loader is None:
                self.loader, self.class_num = get_train_loader(
                    conf, train_transforms)
            else:
                self.loader = train_loader
                self.class_num = conf.num_classes

            if conf.net_mode in ['ir', 'ir_se']:
                self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                      conf.net_mode,
                                      conf.use_gap).to(conf.device)
            else:
                import json
                self.model = MetricNet(model_name=conf.net_mode,
                                       pooling=conf.pooling,
                                       use_fc=True,
                                       fc_dim=conf.embedding_size,
                                       dropout=conf.last_fc_dropout,
                                       pretrained=conf.pretrained,
                                       class_num=self.class_num).to(
                                           conf.device)
                print('{}_{} model generated'.format(conf.net_mode,
                                                     conf.net_depth))

            if conf.use_mobilfacenet or conf.net_mode in ['ir', 'ir_se']:
                self.head = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num).to(conf.device)
            else:
                if conf.loss_module == 'arcface':
                    self.head = ArcMarginProduct(self.model.final_in_features,
                                                 self.class_num,
                                                 s=conf.s,
                                                 m=conf.margin,
                                                 easy_margin=False,
                                                 ls_eps=conf.ls_eps).to(
                                                     conf.device)
                elif conf.loss_module == 'cosface':
                    self.head = AddMarginProduct(self.model.final_in_features,
                                                 self.class_num,
                                                 s=conf.s,
                                                 m=conf.margin).to(conf.device)
                elif conf.loss_module == 'adacos':
                    self.head = AdaCos(self.model.final_in_features,
                                       self.class_num,
                                       m=conf.margin,
                                       theta_zero=conf.theta_zero).to(
                                           conf.device)
                else:
                    self.head = nn.Linear(self.model.final_in_features,
                                          self.class_num).to(conf.device)

            print('two model heads generated')
            if conf.ft_model_path:
                self.load_ft_model(conf.ft_model_path, not conf.no_strict)
            elif conf.restore_suffix:
                self.load_state(conf,
                                conf.restore_suffix,
                                from_save_folder=False,
                                model_only=False)

            if not inference:

                self.writer = SummaryWriter(conf.log_path)
                self.step = 0

                paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

                if conf.use_mobilfacenet:
                    params = [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }]
                    wd = 4e-5
                else:
                    # if conf.net_mode in ['ir', 'ir_se']:
                    # params = [
                    #     {'params': paras_wo_bn + [self.head.weight], 'weight_decay': 5e-4},
                    #     {'params': paras_only_bn}
                    # ]
                    params = [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }]
                    wd = 5e-4
                    # else:
                    #     params = self.model.parameters()
                    #     wd = conf.wd
                    #     # params = [
                    #     #     {'params': paras_wo_bn + [self.head.weight], 'weight_decay': conf.wd},  # 5e-4},
                    #     #     {'params': paras_only_bn}
                    #     # ]

                if conf.optimizer == 'sgd':
                    self.optimizer = optim.SGD(
                        params, lr=conf.lr,
                        momentum=conf.momentum)  # , weight_decay=wd)
                elif conf.optimizer == 'adam':
                    self.optimizer = optim.Adam(
                        params, lr=conf.lr)  # , weight_decay=wd)
                print(self.optimizer)
                #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

                print('optimizers generated')
                self.board_loss_every = len(self.loader) // 100
                self.evaluate_every = len(self.loader) // 10
                self.save_every = len(self.loader) // 5

                self.board_loss_every = 20
                self.evaluate_every = len(self.loader)
                self.save_every = len(self.loader)
                if conf.data_mode == 'common':
                    import json
                    val_img_dir_map = json.loads(conf.val_img_dirs)
                    self.val_dataloaders = {}
                    for val_name in val_img_dir_map:
                        val_img_dir = val_img_dir_map[val_name]
                        val_dataloader, common_val_issame = get_common_val_data(
                            val_img_dir,
                            conf.max_positive_cnt,
                            conf.val_batch_size,
                            conf.val_pin_memory,
                            conf.num_workers,
                            val_transforms=val_transforms,
                            use_pos=not conf.not_use_pos,
                            use_neg=not conf.not_use_neg,
                            val_smapling_type=conf.val_smapling_type,
                            use_keras_model=conf.use_val_left_right_check)
                        self.val_dataloaders[val_name] = [
                            val_dataloader, common_val_issame
                        ]
                elif conf.data_mode == 'dacon_landmark':

                    pass
                else:
                    self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                        self.loader.dataset.root.parent)
            else:
                self.threshold = conf.threshold
示例#14
0
def test_Backbone_output_shape():
    bb = Backbone(2, 3, 100)
    input = torch.rand(1, 3, 16, 16)
    assert bb(input).shape == (1, 2, 100, 2, 2)
示例#15
0
    def __init__(self, conf, inference=False, need_loader=True):
        print(conf)
        if conf.use_mobilfacenet:
            # self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            self.model = torch.nn.DataParallel(
                MobileFaceNet(conf.embedding_size)).cuda()
            print('MobileFaceNet model generated')
        else:
            # self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            self.model = torch.nn.DataParallel(
                Backbone(conf.net_depth, conf.drop_ratio,
                         conf.net_mode)).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            if need_loader:
                # self.loader, self.class_num = get_train_loader(conf)

                self.dataset = Dataset2()
                self.loader = DataLoader(self.dataset,
                                         batch_size=conf.batch_size,
                                         num_workers=conf.num_workers,
                                         shuffle=True,
                                         pin_memory=True)

                # self.loader = Loader2(conf)
                self.class_num = 85164
                print(self.class_num, 'classes, load ok ')
            else:
                import copy
                conf_t = copy.deepcopy(conf)
                conf_t.data_mode = 'emore'
                self.loader, self.class_num = get_train_loader(conf_t)
                print(self.class_num)
                self.class_num = 85164
            lz.mkdir_p(conf.log_path, delete=True)
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            if conf.loss == 'arcface':
                self.head = Arcface(embedding_size=conf.embedding_size,
                                    classnum=self.class_num).to(conf.device)
            elif conf.loss == 'softmax':
                self.head = MySoftmax(embedding_size=conf.embedding_size,
                                      classnum=self.class_num).to(conf.device)
            else:
                raise ValueError(f'{conf.loss}')

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)
            print('optimizers generated')
            self.board_loss_every = 100  # len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root_path)
        else:
            self.threshold = conf.threshold
示例#16
0
    def __init__(self, conf, inference=False, embedding_size=512):
        conf.embedding_size = embedding_size
        print(conf)

        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).cuda()
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        parameter_num_cal(self.model)

        self.milestones = conf.milestones
        self.loader, self.class_num = get_train_loader(conf)
        self.step = 0
        self.agedb_30, self.cfp_fp, self.lfw, self.calfw, self.cplfw, self.vgg2_fp, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.calfw_issame, self.cplfw_issame, self.vgg2_fp_issame = get_val_data(
            self.loader.dataset.root.parent)
        self.writer = SummaryWriter(conf.log_path)

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            if conf.multi_sphere:
                if conf.arcface_loss:
                    self.head = ArcfaceMultiSphere(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_shpere=conf.num_sphere,
                        m=conf.m).to(conf.device)
                elif conf.am_softmax:
                    self.head = MultiAm_softmax(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_sphere=conf.num_sphere,
                        m=conf.m).to(conf.device)
                else:
                    self.head = MultiSphereSoftmax(
                        embedding_size=conf.embedding_size,
                        classnum=self.class_num,
                        num_sphere=conf.num_sphere).to(conf.device)

            else:
                if conf.arcface_loss:
                    self.head = Arcface(embedding_size=conf.embedding_size,
                                        classnum=self.class_num).to(
                                            conf.device)
                elif conf.am_softmax:
                    self.head = Am_softmax(embedding_size=conf.embedding_size,
                                           classnum=self.class_num).to(
                                               conf.device)
                else:
                    self.head = Softmax(embedding_size=conf.embedding_size,
                                        classnum=self.class_num).to(
                                            conf.device)

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                if conf.multi_sphere:
                    self.optimizer = optim.SGD([{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + self.head.kernel_list,
                        'weight_decay':
                        4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn[:-1],
                            'weight_decay': 4e-5
                        }, {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            else:
                if conf.multi_sphere:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn + self.head.kernel_list,
                            'weight_decay': 5e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn + [self.head.kernel],
                            'weight_decay': 5e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)

            print(self.optimizer)

            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.calfw, self.cplfw, self.vgg2_fp, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.calfw_issame, self.cplfw_issame, self.vgg2_fp_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
示例#17
0
    def __init__(self, conf, inference=False, transfer=0, ext='final'):
        pprint.pprint(conf)
        self.conf = conf
        if conf.arch == "mobile":
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        elif conf.arch == "ir_se":
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.arch).to(conf.device)
            print('{}_{} model generated'.format(conf.arch, conf.net_depth))
        elif conf.arch == "resnet50":
            self.model = ResNet(embedding_size=512,
                                arch=conf.arch).to(conf.device)
            print("resnet model {} generated".format(conf.arch))
        else:
            exit("model not supported yet!")

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            tmp_idx = ext.rfind('_')  # find the last '_' to replace it by '/'
            self.ext = '/' + ext[:tmp_idx] + '/' + ext[tmp_idx + 1:]
            self.writer = SummaryWriter(str(conf.log_path) + self.ext)
            self.step = 0

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if transfer == 3:
                self.optimizer = optim.Adam(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 2:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': paras_wo_bn + [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            elif transfer == 1:
                self.optimizer = optim.Adam(
                    [
                        {
                            'params': [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                    lr=conf.lr)  # , momentum = conf.momentum)
            else:
                """
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
                """
                self.optimizer = optim.Adam(list(self.model.parameters()) +
                                            list(self.head.parameters()),
                                            lr=conf.lr)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.save_freq = len(self.loader)  #//5 # originally, 100
            self.evaluate_every = len(self.loader)  #//5 # originally, 10
            self.save_every = len(self.loader)  #//2 # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            # self.val_112, self.val_112_issame = get_val_pair(self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold

        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_accuracy = []
        self.test_counter = []
示例#18
0
    def __init__(self, conf, args, inference=False):
        print(conf)
        self.local_rank = args.local_rank
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).cuda()
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).cuda()

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            #[self.model, self.head], self.optimizer = amp.initialize([self.model, self.head], self.optimizer, opt_level='O1')
            [self.model, self.head
             ], self.optimizer = amp.initialize([self.model, self.head],
                                                self.optimizer,
                                                opt_level='O3',
                                                keep_batchnorm_fp32=True)
            print(self.optimizer, args.local_rank)
            self.head = DistributedDataParallel(self.head)
            self.model = DistributedDataParallel(self.model)
            #self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[args.local_rank])
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 10
            self.save_every = len(self.loader) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold
示例#19
0
def prepare(args):
    resume_from_checkpoint = args.resume_from_checkpoint

    prepare_start_time = time.time()
    logger.info('global', 'Start preparing.')
    check_config_dir()
    logger.info('setting', config_info(), time_report=False)

    model = Backbone()
    model = model.cuda()
    logger.info('setting', model_summary(model), time_report=False)
    logger.info('setting', str(model), time_report=False)

    branches = [
        main_branch(Config.nr_class, Config.in_planes),
        parsing_branch(Config.nr_class, Config.in_planes),
        parsing_branch(Config.nr_class, Config.in_planes),
        parsing_branch(Config.nr_class, Config.in_planes),
        parsing_branch(Config.nr_class, Config.in_planes)
    ]

    train_transforms = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(Config.input_shape),
        transforms.RandomApply([
            transforms.ColorJitter(
                brightness=0.3, contrast=0.3, saturation=0.3, hue=0)
        ],
                               p=0.5),
        transforms.RandomHorizontalFlip(),
        transforms.Pad(10),
        transforms.RandomCrop(Config.input_shape),
        transforms.ToTensor(),
        transforms.RandomErasing(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_transforms = transforms.Compose([
        transforms.Resize(Config.input_shape),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    trainset = Veri776_train(transforms=train_transforms,
                             need_mask=True,
                             bg_switch=Config.p_bgswitch)
    testset = Veri776_test(transforms=test_transforms)

    pksampler = PKSampler(trainset, p=Config.P, k=Config.K)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=Config.batch_size,
                                               sampler=pksampler,
                                               num_workers=Config.nr_worker,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(
        testset,
        batch_size=Config.batch_size,
        sampler=torch.utils.data.SequentialSampler(testset),
        num_workers=Config.nr_worker,
        pin_memory=True)

    weight_decay_setting = parm_list_with_Wdecay_multi([model] + branches)
    optimizer = torch.optim.Adam(weight_decay_setting, lr=Config.lr)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lr_lambda=lr_multi_func)

    losses = {}
    losses['cross_entropy_loss'] = [
        torch.nn.CrossEntropyLoss(),
        weight_cross_entropy(Config.ce_thres[0]),
        weight_cross_entropy(Config.ce_thres[1]),
        weight_cross_entropy(Config.ce_thres[2]),
        weight_cross_entropy(Config.ce_thres[3])
    ]
    losses['triplet_hard_loss'] = [
        triplet_hard_loss(margin=Config.triplet_margin),
        weighted_triplet_hard_loss(margin=Config.branch_margin,
                                   soft_margin=Config.soft_marigin),
        weighted_triplet_hard_loss(margin=Config.branch_margin,
                                   soft_margin=Config.soft_marigin),
        weighted_triplet_hard_loss(margin=Config.branch_margin,
                                   soft_margin=Config.soft_marigin),
        weighted_triplet_hard_loss(margin=Config.branch_margin,
                                   soft_margin=Config.soft_marigin)
    ]

    for k in losses.keys():
        if isinstance(losses[k], list):
            for i in range(len(losses[k])):
                losses[k][i] = losses[k][i].cuda()
        else:
            losses[k] = losses[k].cuda()

    for i in range(len(branches)):
        branches[i] = branches[i].cuda()

    start_epoch = 0
    if resume_from_checkpoint and os.path.exists(Config.checkpoint_path):
        checkpoint = load_checkpoint()
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])

    # continue training for next the epoch of the checkpoint, or simply start from 1
    start_epoch += 1

    ret = {
        'start_epoch': start_epoch,
        'model': model,
        'branches': branches,
        'train_loader': train_loader,
        'test_loader': test_loader,
        'optimizer': optimizer,
        'scheduler': scheduler,
        'losses': losses
    }

    prepare_end_time = time.time()
    time_spent = sec2min_sec(prepare_start_time, prepare_end_time)
    logger.info(
        'global', 'Finish preparing, time spend: {}mins {}s.'.format(
            time_spent[0], time_spent[1]))

    return ret
示例#20
0
    def __init__(self, conf, inference=False):
        self.backbone = Backbone().to(conf.device)
        self.idprehead = PreheadID().to(conf.device)
        self.idhead = Arcface().to(conf.device)
        self.attrhead = Attrhead().to(conf.device)
        print('model generated'.format(conf.net_mode, conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            train_dataset = CelebA(
                'dataset', 'celebA_train.txt',
                trans.Compose([
                    trans.RandomHorizontalFlip(),
                    trans.ToTensor(),
                    trans.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
                ]))
            valid_dataset = CelebA(
                'dataset', 'celebA_validation.txt',
                trans.Compose([
                    trans.RandomHorizontalFlip(),
                    trans.ToTensor(),
                    trans.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
                ]))
            self.loader = DataLoader(train_dataset,
                                     batch_size=conf.batch_size,
                                     shuffle=True,
                                     pin_memory=conf.pin_memory,
                                     num_workers=conf.num_workers)
            self.valid_loader = DataLoader(valid_dataset,
                                           batch_size=conf.batch_size,
                                           shuffle=True,
                                           pin_memory=conf.pin_memory,
                                           num_workers=conf.num_workers)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            paras_only_bn_1, paras_wo_bn_1 = separate_bn_paras(self.backbone)
            paras_only_bn_2, paras_wo_bn_2 = separate_bn_paras(self.idprehead)
            paras_only_bn_3, paras_wo_bn_3 = separate_bn_paras(self.attrhead)
            paras_only_bn = paras_only_bn_1 + paras_only_bn_2 + paras_only_bn_3
            paras_wo_bn = paras_wo_bn_1 + paras_wo_bn_2 + paras_wo_bn_3

            self.optimizer = optim.SGD(
                [{
                    'params': paras_wo_bn + [self.idhead.kernel],
                    'weight_decay': 1e-4
                }, {
                    'params': paras_only_bn
                }],
                lr=conf.lr,
                momentum=conf.momentum)
            #             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 8
            self.evaluate_every = len(self.loader) // 4
            self.save_every = len(self.loader) // 2
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                Path("data/faces_emore/"))
        else:
            self.threshold = conf.threshold
示例#21
0
def train(args):
    DEVICE = torch.device(("cuda:%d"%args.gpu[0]) if torch.cuda.is_available() else "cpu")
    writer = SummaryWriter(args.log_root)
    train_transform = transforms.Compose([transforms.Resize([int(128*args.input_size/112), int(128*args.input_size/112)]),
                                        transforms.RandomCrop([args.input_size, args.input_size]),
                                        transforms.RandomHorizontalFlip(),
                                        transforms.ToTensor(),
                                        transforms.Normalize(mean=[args.rgb_mean,args.rgb_mean,args.rgb_mean], std=[args.rgb_std,args.rgb_std,args.rgb_std])
                                        ])
    train_dataset = datasets.ImageFolder(args.data_root, train_transform)
    weights = make_weights_for_balanced_classes(train_dataset.imgs, len(train_dataset.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, shuffle=True, drop_last=True)
    NUM_CLASS = len(train_loader.dataset.classes)

    BACKBONE = Backbone([args.input_size, args.input_size], args.num_layers, args.mode)
    HEAD = ArcFace(args.emb_dims, NUM_CLASS, device_id=args.gpu)
    LOSS = FocalLoss()
    backbone_paras_only_bn, backbone_paras_wo_bn = separate_irse_bn_paras(BACKBONE)
    _, head_paras_wo_bn = separate_irse_bn_paras(HEAD)
    optimizer = optim.SGD([{'params': backbone_paras_wo_bn+head_paras_wo_bn, 'weight_decay': args.weight_decay},
                        {'params': backbone_paras_only_bn}], lr=args.lr, momentum=args.momentum)
    # optimizer = optim.AdamW([{'params': backbone_paras_wo_bn+head_paras_wo_bn, 'weight_decay': args.weight_decay},
    #                     {'params': backbone_paras_only_bn}], lr=args.lr, momentum=args.momentum)
    
    if args.load_ckpt:
        BACKBONE.load_state_dict(torch.load(os.path.join(args.load_ckpt, 'backbone_epoch{}.pth'.format(args.load_epoch))))
        HEAD.load_state_dict(torch.load(os.path.join(args.load_ckpt, 'head_epoch{}.pth'.format(args.load_epoch))))
        print('Checkpoint loaded')
    
    start_epoch = args.load_epoch if args.load_ckpt else 0

    BACKBONE = nn.DataParallel(BACKBONE, device_ids=args.gpu)
    BACKBONE = BACKBONE.to(DEVICE)

    dispaly_frequency = len(train_loader) // 100
    NUM_EPOCH_WARM_UP = args.num_epoch // 25
    NUM_BATCH_WARM_UP = len(train_loader) * NUM_EPOCH_WARM_UP
    batch = 0
    print('Start training at %s!' % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    for epoch in range(start_epoch, args.num_epoch):
        if epoch==args.stages[0] or epoch==args.stages[1] or epoch==args.stages[2]:
            for params in optimizer.param_groups:
                params['lr'] /= 10.
        BACKBONE.train()
        HEAD.train()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        for inputs, labels in train_loader:
            if (epoch+1 <= NUM_EPOCH_WARM_UP) and (batch+1 <= NUM_BATCH_WARM_UP):
                for params in optimizer.param_groups:
                    params['lr'] = (batch+1) * args.lr / NUM_BATCH_WARM_UP
            inputs = inputs.to(DEVICE)
            labels = labels.to(DEVICE).long()
            features = BACKBONE(inputs)
            outputs = HEAD(features, labels)
            loss = LOSS(outputs, labels)
            prec1, prec5 = accuracy(outputs.data, labels, topk=(1,5))
            losses.update(loss.data.item(), inputs.size(0))
            top1.update(prec1.data.item(), inputs.size(0))
            top5.update(prec5.data.item(), inputs.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            batch += 1
            if batch % dispaly_frequency == 0:
                print('%s Epoch %d/%d Batch %d/%d: train loss %f, train prec@1 %f, train prec@5 %f' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    epoch, args.num_epoch, batch, len(train_loader)*args.num_epoch, losses.avg, top1.avg, top5.avg))
        writer.add_scalar('Train_Loss', losses.avg, epoch+1)
        writer.add_scalar('Train_Top1_Accuracy', top1.avg, epoch+1)
        writer.add_scalar('Train_Top5_Accuracy', top5.avg, epoch+1)
        torch.save(BACKBONE.module.state_dict(), os.path.join(args.ckpt_root, 'backbone_epoch%d.pth'%(epoch+1)))
        torch.save(HEAD.state_dict(), os.path.join(args.ckpt_root, 'head_epoch%d.pth'%(epoch+1)))
示例#22
0
    def __init__(self, conf, inference=False):
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            self.growup = GrowUP().to(conf.device)
            self.discriminator = Discriminator().to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:

            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            if conf.discriminator:
                self.child_loader, self.adult_loader = get_train_loader_d(conf)

            os.makedirs(conf.log_path, exist_ok=True)
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0

            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            # Will not use anymore
            if conf.use_dp:
                self.model = nn.DataParallel(self.model)
                self.head = nn.DataParallel(self.head)

            print(self.class_num)
            print(conf)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            if conf.discriminator:
                self.optimizer_g = optim.Adam(self.growup.parameters(),
                                              lr=1e-4,
                                              betas=(0.5, 0.999))
                self.optimizer_g2 = optim.Adam(self.growup.parameters(),
                                               lr=1e-4,
                                               betas=(0.5, 0.999))
                self.optimizer_d = optim.Adam(self.discriminator.parameters(),
                                              lr=1e-4,
                                              betas=(0.5, 0.999))
                self.optimizer2 = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)

            if conf.finetune_model_path is not None:
                self.optimizer = optim.SGD([{
                    'params': paras_wo_bn,
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                                           lr=conf.lr,
                                           momentum=conf.momentum)
            print('optimizers generated')

            self.board_loss_every = len(self.loader) // 100
            self.evaluate_every = len(self.loader) // 2
            self.save_every = len(self.loader)

            dataset_root = "/home/nas1_userD/yonggyu/Face_dataset/face_emore"
            self.lfw = np.load(
                os.path.join(dataset_root,
                             "lfw_align_112_list.npy")).astype(np.float32)
            self.lfw_issame = np.load(
                os.path.join(dataset_root, "lfw_align_112_label.npy"))
            self.fgnetc = np.load(
                os.path.join(dataset_root,
                             "FGNET_new_align_list.npy")).astype(np.float32)
            self.fgnetc_issame = np.load(
                os.path.join(dataset_root, "FGNET_new_align_label.npy"))
        else:
            # Will not use anymore
            # self.model = nn.DataParallel(self.model)
            self.threshold = conf.threshold
示例#23
0
 def __init__(self, num_classes, config):
     super(ID_encoder, self).__init__()
     self.backbone = Backbone(num_classes, config)
示例#24
0
    def __init__(self, conf, inference=False):
        print(conf)

        self.num_splits = int(conf.meta_file.split('_labels.txt')[0][-1])

        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if conf.device > 1:
            gpu_ids = list(
                range(0, min(torch.cuda.device_count(), conf.device)))
            self.model = nn.DataParallel(self.model, device_ids=gpu_ids).cuda()
        else:
            self.model = self.model.cuda()

        if not inference:
            self.milestones = conf.milestones

            if conf.remove_single is True:
                conf.meta_file = conf.meta_file.replace('.txt', '_clean.txt')
            meta_file = open(conf.meta_file, 'r')
            meta = meta_file.readlines()
            pseudo_all = [int(item.split('\n')[0]) for item in meta]
            pseudo_classnum = set(pseudo_all)
            if -1 in pseudo_classnum:
                pseudo_classnum = len(pseudo_classnum) - 1
            else:
                pseudo_classnum = len(pseudo_classnum)
            print('classnum:{}'.format(pseudo_classnum))

            pseudo_classes = [
                pseudo_all[count[index]:count[index + 1]]
                for index in range(self.num_splits)
            ]
            meta_file.close()

            train_dataset = [get_train_dataset(conf.emore_folder)] + [
                get_pseudo_dataset([conf.pseudo_folder, index + 1],
                                   pseudo_classes[index], conf.remove_single)
                for index in range(self.num_splits)
            ]
            self.class_num = [num for _, num in train_dataset]
            print('Loading dataset done')

            train_longest_size = [len(item[0]) for item in train_dataset]
            temp = int(np.floor(conf.batch_size // (self.num_splits + 1)))
            self.batch_size = [conf.batch_size - temp * self.num_splits
                               ] + [temp] * self.num_splits
            train_longest_size = max([
                int(np.floor(td / bs))
                for td, bs in zip(train_longest_size, self.batch_size)
            ])
            train_sampler = [
                GivenSizeSampler(td[0],
                                 total_size=train_longest_size * bs,
                                 rand_seed=None)
                for td, bs in zip(train_dataset, self.batch_size)
            ]

            self.train_loader = [
                DataLoader(train_dataset[k][0],
                           batch_size=self.batch_size[k],
                           shuffle=False,
                           pin_memory=conf.pin_memory,
                           num_workers=conf.num_workers,
                           sampler=train_sampler[k])
                for k in range(1 + self.num_splits)
            ]
            print('Loading loader done')

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = [
                Arcface(embedding_size=conf.embedding_size,
                        classnum=self.class_num[0]),
                Arcface(embedding_size=conf.embedding_size,
                        classnum=pseudo_classnum)
            ]

            if conf.device > 1:
                self.head = [
                    nn.DataParallel(self.head[0], device_ids=gpu_ids).cuda(),
                    nn.DataParallel(self.head[1], device_ids=gpu_ids).cuda()
                ]
            else:
                self.head = [self.head[0].cuda(), self.head[1].cuda()]

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model.module)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.parameters()],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                params = [a.module.parameters() for a in self.head]
                params = list(params[0]) + list(params[1])
                #from IPython import embed;embed()
                self.optimizer = optim.SGD([{
                    'params': paras_wo_bn + params,
                    'weight_decay': 5e-4
                }, {
                    'params': paras_only_bn
                }],
                                           lr=conf.lr,
                                           momentum=conf.momentum)
            print(self.optimizer)

            if conf.resume is not None:
                self.start_epoch = self.load_state(conf.resume)
            else:
                self.start_epoch = 0

            print('optimizers generated')
            self.board_loss_every = len(self.train_loader[0]) // 10
            self.evaluate_every = len(self.train_loader[0]) // 5
            self.save_every = len(self.train_loader[0]) // 5
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                conf.eval_path)
        else:
            self.threshold = conf.threshold
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)
            print('class_num:', self.class_num)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # if conf.data_mode == 'small_vgg':
            #     self.board_loss_every = len(self.loader)
            #     print('len(loader', len(self.loader))
            #     self.evaluate_every = len(self.loader)
            #     self.save_every = len(self.loader)
            #     # self.lfw, self.lfw_issame = get_val_data(conf, conf.smallvgg_folder)

            # else:
            #     self.board_loss_every = len(self.loader)

            #     self.evaluate_every = len(self.loader)//10
            #     self.save_every = len(self.loader)//5
            self.agedb_30, self.cfp_fp, self.lfw, self.kface, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame, self.kface_issame = get_val_data(
                conf, self.loader.dataset.root.parent)

        else:
            self.threshold = conf.threshold
示例#26
0
    torch.manual_seed(1234)
    torch.cuda.manual_seed(1234)
    torch.cuda.manual_seed_all(1234)
    np.random.seed(1234)
    random.seed(1234)
    torch.backends.cudnn.deterministic = True
    cudnn.benchmark = True

    #CONFIG PARSER
    config = get_args()
    output_path = config.output_path
    make_dirs(output_path)
    logger = setup_logger('reid_baseline',output_path,if_train=True)

    train_loader,train_gen_loader, val_loader, num_query, num_classes = make_dataloader(config)
    model = Backbone(num_classes,config)
    # if config.pretrain:
    #     model.load_param_finetune(config.m_pretrain_path)

    loss_func, center_criterion = make_loss(config, num_classes=num_classes)
    optimizer, optimizer_center = make_optimizer( model, center_criterion)
    scheduler = WarmupMultiStepLR(optimizer, [40,70], 0.1,
                                  0.01,
                                  10, 'linear')

    log_period = config.log_interval
    checkpoint_period = config.save_model_interval
    eval_period = config.test_interval

    device = "cuda"
    epochs = 80
    def __init__(self, conf, inference=False, transfer=0):
        pprint.pprint(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                if transfer == 3:
                    self.optimizer = optim.SGD(
                        [{
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
                elif transfer == 2:
                    self.optimizer = optim.SGD([
                        {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                elif transfer == 1:
                    self.optimizer = optim.SGD([
                        {
                            'params': [self.head.kernel],
                            'weight_decay': 4e-4
                        },
                    ],
                                               lr=conf.lr,
                                               momentum=conf.momentum)
                else:
                    self.optimizer = optim.SGD(
                        [{
                            'params': paras_wo_bn[:-1],
                            'weight_decay': 4e-5
                        }, {
                            'params': [paras_wo_bn[-1]] + [self.head.kernel],
                            'weight_decay': 4e-4
                        }, {
                            'params': paras_only_bn
                        }],
                        lr=conf.lr,
                        momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            self.board_loss_every = len(self.loader) // 5  # originally, 100
            self.evaluate_every = len(self.loader) // 5  # originally, 10
            self.save_every = len(self.loader) // 2  # originally, 5
            # self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
            self.val_112, self.val_112_issame = get_val_pair(
                self.loader.dataset.root.parent, 'val_112')
        else:
            self.threshold = conf.threshold
示例#28
0
    def __init__(self, conf, inference=False):
        print(conf)
        self.lr=conf.lr
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
            
                print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.model = resnet101().to(conf.device)
                print('resnet101 model generated')
            
        
        if not inference:
            self.milestones = conf.milestones
            self.loader, self.class_num = get_train_loader(conf)        

            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            
        ###############################  ir_se50  ########################################
            if conf.struct =='ir_se_50':
                self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
                self.head_race = Arcface(embedding_size=conf.embedding_size, classnum=4).to(conf.device)
        
        ###############################  resnet101  ######################################
            if conf.struct =='ir_se_101':
                self.head = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
                self.head_race = ArcMarginModel(embedding_size=conf.embedding_size,classnum=self.class_num).to(conf.device)
            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
            
            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
                                    {'params': [paras_wo_bn[-1]] + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 4e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            else:
                self.optimizer = optim.SGD([
                                    {'params': paras_wo_bn + [self.head.kernel] + [self.head_race.kernel], 'weight_decay': 5e-4},
                                    {'params': paras_only_bn}
                                ], lr = conf.lr, momentum = conf.momentum)
            print(self.optimizer)
#             self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')    
            print('len of loader:',len(self.loader)) 
            self.board_loss_every = len(self.loader)//min(len(self.loader),100)
            self.evaluate_every = len(self.loader)//1
            self.save_every = len(self.loader)//1
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(conf.val_folder)
        else:
            #self.threshold = conf.threshold
            pass
from model import Backbone, Arcface, MobileFaceNet, Am_softmax, l2_norm
from torchvision import transforms as trans
import PIL.Image as Image
from mtcnn import MTCNN
import torch
import cv2
import os

img_root_dir = '../img_align_celeba'
save_path = '../celeba_64'

device = torch.device('cuda:0')
mtcnn = MTCNN()

model = Backbone(50, 0.6, 'ir_se').to(device)
model.eval()
model.load_state_dict(torch.load('./saved_models/model_ir_se50.pth'))

# threshold = 1.54
test_transform = trans.Compose([
    trans.ToTensor(),
    trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

# decoder = libnvjpeg.py_NVJpegDecoder()

ind = 0
embed_map = {}

for root, dirs, files in os.walk(img_root_dir):
    files_len = len(files)
示例#30
0
    def __init__(self, conf, inference=False):
        print(conf)
        if conf.use_mobilfacenet:
            self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
            print('MobileFaceNet model generated')
        else:
            self.model = Backbone(conf.net_depth, conf.drop_ratio,
                                  conf.net_mode).to(conf.device)
            print('{}_{} model generated'.format(conf.net_mode,
                                                 conf.net_depth))

        if not inference:
            self.milestones = conf.milestones
            print('prepare train loader..')
            self.loader, self.class_num = get_train_loader(conf)
            current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            self.writer = SummaryWriter(str(conf.log_path / current_time))
            self.step = 0
            self.head = Arcface(embedding_size=conf.embedding_size,
                                classnum=self.class_num).to(conf.device)

            print('two model heads generated')

            paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)

            if conf.use_mobilfacenet:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn[:-1],
                        'weight_decay': 4e-5
                    }, {
                        'params': [paras_wo_bn[-1]] + [self.head.kernel],
                        'weight_decay': 4e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            else:
                self.optimizer = optim.SGD(
                    [{
                        'params': paras_wo_bn + [self.head.kernel],
                        'weight_decay': 5e-4
                    }, {
                        'params': paras_only_bn
                    }],
                    lr=conf.lr,
                    momentum=conf.momentum)
            print(self.optimizer)
            self.lrscheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer, factor=0.1, patience=20, verbose=True)
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

            print('optimizers generated')
            # len(self.loader): number of batches?
            self.board_loss_every = len(self.loader) // 120
            self.evaluate_every = len(self.loader) // 40
            self.save_every = len(self.loader) // 40
            self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                self.loader.dataset.root.parent)
        else:
            self.threshold = conf.threshold