Exemple #1
0
    def inference(self,conf,img):
        mtcnn = MTCNN()
        learner = face_learner(conf,True)
        learner.load_state(conf,'final.pth',True,True)
        learner.model.eval()
        targets, names = load_facebank(conf)
        
        image = Image.open(img)
        frame = cv2.imread(img,cv2.IMREAD_COLOR)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        try:
            bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size)
            bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
            bboxes = bboxes.astype(int)
            bboxes = bboxes + [-1,-1,1,1] # personal choice    
            results, score = learner.infer(conf, faces, targets, False)
            name = names[results[0]+1]
            frame = draw_box_name(bboxes[0], name, frame)
        except Exception as ex:
            name = "Can't detect face."
            h, w, c = frame.shape
            bbox = [int(h*0.5),int(w*0.5),int(h*0.5),int(w*0.5)]
            frame = draw_box_name(bbox, name, frame)
            
        return name, frame
Exemple #2
0
def authenticuser(path,userid):
    conf = get_config(False)
    mtcnn = MTCNN()
    print('mtcnn loaded')
    
    learner = face_learner(conf, True)
    learner.threshold = 1.35
    learner.load_state(conf, 'cpu_final.pth', True, True)
    learner.model.eval()
    print('learner loaded')
    targets = load_facebank_user(conf,userid)
    names=['Unknown',userid]
    print('facebank loaded')
    count =0
    while True:
        frame = cv2.imread(path)
        #try:
        image = Image.fromarray(frame)
        bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size)
        bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
        bboxes = bboxes.astype(int)
        bboxes = bboxes + [-1,-1,1,1] # personal choice    
        results, score = learner.infer(conf, faces, targets)
        for idx,bbox in enumerate(bboxes):
                frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(100-score[idx]), frame)
                result={"_result":"success", "User Verified with":{"confidence": '{:.2f}%'.format(100-score[idx]), "userid": names[results[idx] + 1] , "error": "Success"}}
                accuracy.append('{:.2f}'.format(100-score[idx]))
                user.append(names[results[idx] + 1])
                print( names[results[idx] + 1],'{:.2f}'.format(100-score[idx]))
        count=1     
        #except:
        #   print('detect error')    
        if count>0:
            break
    return result
Exemple #3
0
def load_Learner(conf, args):
    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    if conf.device.type == 'cpu':
        learner.load_state(conf, 'cpu_final.pth', True, True)
    else:
        learner.load_state(conf, 'final.pth', True, True)
    learner.model.eval()
    return learner
Exemple #4
0
 def __init__(self):
     self.faceDataDir = './data/facerec/'
     self.modelDir = './data/desired_model/'
     self.modelFn = self.modelDir + 'model_mobilefacenet.pth'
     self.learner = face_learner(use_mobilfacenet=True)
     self.learner.load_state(self.modelFn)
     self.learner.model.eval()
     self.allPersons = []
     self.read_all_register_info()
Exemple #5
0
def train():
    conf = get_config(False)
    mtcnn = MTCNN()
    #print('mtcnn loaded')
    learner = face_learner(conf, True)
    learner.load_state(conf, 'cpu_final.pth', True, True)
    learner.model.eval()
    print('learner loaded')
    targets, names = prepare_facebank(conf, learner.model, mtcnn)
    return {'_result': 'success', '_message': 'Model Is Updated'}
Exemple #6
0
def extract2db(dbname):
    import tqdm

    ds = DatasetIJBC()
    loader = torch.utils.data.DataLoader(ds,
                                         batch_size=1,
                                         num_workers=12,
                                         shuffle=False,
                                         pin_memory=True,
                                         collate_fn=my_collate)

    learner = face_learner(conf, inference=True)
    assert conf.device.type != 'cpu'
    prefix = list(model_path.glob('model*_*.pth'))[0].name.replace(
        'model_', '')
    learner.load_state(conf, prefix, True, True)
    learner.model.eval()
    logging.info('learner loaded')

    timer.since_last_check('start')
    db = Database(dbname)
    for ind, res in tqdm.tqdm(enumerate(loader), ):
        img = res['img']
        imp = res['imgp']
        # print(img.shape[0])
        sid = res['sid']
        tid = res['tid']
        # todo basthc size here msut 1
        sid = sid[0].item()
        tid = tid[0].item()
        finish = res['finish']
        # extract fea
        start = 0
        fea_l = []
        norm_l = []
        with torch.no_grad():
            while start < len(img):
                img_now = img[start:start + true_batch_size]
                # todo multi gpu: fix dataparallel
                fea, norm = learner.model(img_now,
                                          return_norm=True,
                                          normalize=False)
                start += true_batch_size
                fea_l.append(fea.cpu())
                norm_l.append(norm.cpu())
        fea = torch.cat(fea_l).numpy()
        norm = torch.cat(norm_l).numpy()
        ## save to db
        db[f'fea/{sid}/{tid}'] = fea
        db[f'norm/{sid}/{tid}'] = norm
        db[f'imp/{sid}/{tid}'] = msgpack_dumps(imp)
        # msgpack_loads(db[f'imp/{sid}/{tid}'][...].tolist())
        # if ind>10:break
    db.close()
Exemple #7
0
def init_learner(conf):

    learner = face_learner(conf, True)
    learner.threshold = THRESHOLD

    if conf.device.type == 'cpu':
        learner.load_state(conf, 'cpu_final.pth', True, True)
    else:
        learner.load_state(conf, 'final.pth', True, True)

    learner.model.eval()
    print('learner loaded')
    return learner
Exemple #8
0
def fn_face_verify_module():
    mtcnn = MTCNN()
    print('mtcnn loaded')
    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    if conf.device.type == 'cpu':
        learner.load_state(conf, 'cpu_final.pth', True, True)
    else:
        learner.load_state(conf, 'final.pth', True, True)
    learner.model.eval()
    print('learner loaded')

    if args.update:
        targets, names = prepare_facebank(conf,
                                          learner.model,
                                          mtcnn,
                                          tta=args.tta)
        print('facebank updated')
    else:
        targets, names = load_facebank(conf)
        print('facebank loaded')

    isSuccess, frame = cap.read()
    if isSuccess:
        try:
            image = Image.fromarray(frame)
            bboxes, faces = mtcnn.align_multi(image, conf.face_limit,
                                              conf.min_face_size)
            bboxes = bboxes[:, :
                            -1]  # shape:[10,4],only keep 10 highest possibiity faces
            bboxes = bboxes.astype(int)
            bboxes = bboxes + [-1, -1, 1, 1]  # personal choice
            results, score = learner.infer(conf, faces, targets, args.tta)
            for idx, bbox in enumerate(bboxes):
                if args.score:
                    frame = draw_box_name(
                        bbox,
                        names[results[idx] + 1] + '_{:.2f}'.format(score[idx]),
                        frame)
                else:
                    frame = draw_box_name(bbox, names[results[idx] + 1], frame)
        except:
            print('detect error')

        cv2.imshow('face Capture', frame)

    if args.save:
        video_writer.write(frame)
def caculate_distance_image(root_folder,parse_text_file): 
    conf = get_config(False)
    mtcnn = MTCNN()
    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    if conf.device.type == 'cpu':
        learner.load_state(conf, True, True)
    else:
        learner.load_state(conf, True, True)
    learner.model.eval()
    
    embedding = [] 
    names = []
    
    folder = os.listdir(root_folder) 
    for file in folder: 
        try:
            img = os.path.join(root_folder,file)     
            embs, name = extract_single_image(img, conf, learner.model, mtcnn, tta= args.tta)
            embedding.append(embs)
            names.append(name)
        except: 
            continue
    if os.path.exists(parse_text_file): 
        pass 
    else: 
        with open(parse_text_file,"w") as fs: 
            fs.write('\n CLASS: {} \n'.format(root_folder.split('/')[-1]))
            for i in range(1,len(embedding),1):  
                try:
                    embs1 = embedding[0]
                    embs2 = embedding[i]
                    _, score = distance(embs1, embs2)
                    line = 'distance image:{} -> image:{} is:{:.4f} \n'.format(names[0],names[i],score[0])
                    fs.write(line)
                except: 
                    continue
            for i in range(2,len(embedding)):
                try:      
                    _, score = distance(embedding[1],embedding[i])
                    line = 'distance image:{} -> image:{} is:{:.4f} \n'.format(names[1],names[i],score[0])
                    fs.write(line)
                except: 
                    continue
Exemple #10
0
 def __init__(self):
     self.stream_url = "https://demo.bahien.com/live/stream/playlist.m3u8"
     self.cap = cv2.VideoCapture(self.stream_url)
     self.cap.set(cv2.CAP_PROP_POS_MSEC, random.randint(3, 1000))
     self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)
     self.conf = get_config(False)
     self.mtcnn = MTCNN()
     self.learner = face_learner(self.conf, True)
     self.learner.threshold = THRESHOLD
     self.learner.load_state(self.conf, 'final.pth', True, True)
     self.learner.model.eval()
     print('learner loaded')
     self.targets, self.names = load_facebank(self.conf)
     print('facebank loaded')
     self.net, self.ln = yolo_detection.init_net()
     print('yolo model loaded')
     self.model = Darknet(config_file)
     self.model.load_weights(weight_file)
     self.model.cuda()
Exemple #11
0
    def __init__(self, register=False):
        self.conf = get_config()
        self.conf.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.mtcnn = My_Function_lib.MTCNN()
        print('mtcnn loaded')

        self.learner = face_learner(self.conf, True)
        if self.conf.device.type == 'cpu':
            self.learner.load_state(self.conf, 'best.pth', True)
        else:
            self.learner.load_state(self.conf, 'final.pth', True)
        self.learner.model.eval()
        print('learner loaded')
        if register == False:
            self.targets, self.names = load_facebank(self.conf)
        print('facebank updated')
        self.process_this_frame = True
        self.Img = None
Exemple #12
0
def register(user_id):
    data_path = Path('data')
    save_path = data_path / 'facebank' / user_id
    fetch_path = data_path / 'dataset' / user_id
    images = load_images_from_folder(fetch_path)
    print(images)
    if not save_path.exists():
        save_path.mkdir()

    mtcnn = MTCNN()
    count = 0
    face_id = user_id
    count = 0
    for img in images:
        frame = img
        p = Image.fromarray(frame[..., ::-1])
        try:
            warped_face = np.array(mtcnn.align(p))[..., ::-1]
            cv2.imwrite(
                "data/facebank/" + str(face_id) + '/' + str(face_id) + '_' +
                str(count) + ".jpg", warped_face)
            count += 1
            #cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), warped_face)
        except:
            result = {
                "_result": "Error",
                "_message": "Unable to detect the face"
            }
    if count == len(images):
        result = {
            "_result": "success",
            "_message": "User Registered Successfully"
        }

    conf = get_config(False)
    learner = face_learner(conf, True)
    learner.load_state(conf, 'cpu_final.pth', True, True)
    learner.model.eval()
    #print('learner loaded')
    targets, names = prepare_facebank(conf, learner.model, mtcnn, user_id)
    #print('facebank updated')
    return result
Exemple #13
0
def run(args, conf):
    image_root = args.image_root
    retina_face_root = args.retinaface
    result_root = args.result_root
    seq = os.path.basename(image_root)
    verbose = args.verbose
    if not osp.exists(result_root):
        os.makedirs(result_root, exist_ok=True)

    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    learner.load_state(conf, fixed_str="ir_se50.pth", from_save_folder=False, model_only=True)
    learner.model.eval()

    ############################################################
    # 1. Construct face bank only using the confident faces
    # from openface
    ############################################################
    face_bank, names, emb_counts = construct_face_bank(seq, retina_face_root, image_root, result_root, learner, verbose)
    names, emb_counts = np.array(names), np.array(emb_counts)
    print("Sequence: {}, found {} unique faces".format(seq, len(emb_counts)))
def caculate_distace_foler(rootdir, parse_text_file):
    
    conf = get_config(False)
    mtcnn = MTCNN()
    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    if conf.device.type == 'cpu':
        learner.load_state(conf, True, True)
    else:
        learner.load_state(conf, True, True)
    learner.model.eval()
    
    
    
    embedding = [] 
    names = []
    folder = os.listdir(rootdir) 
    for path in tqdm.tqdm(folder): 
        dirs = os.path.join(rootdir,path)
        try:
            embs, name = extract_folder_image(Path(dirs) ,conf , learner.model, mtcnn, tta= args.tta)
            embedding.append(embs)
            names.append(name)
        except: 
            continue
    # if os.path.exists(parse_text_file): 
    #     pass 
    # else: 
    with open(parse_text_file,"w") as fs: 
        for i in tqdm.tqdm(range(0,len(embedding)-1)): 
            for j in range(i+1,len(embedding)): 
                _, score = distance(embedding[i],embedding[j])
                print(score)
                line = 'distance class:{} -> class:{} is:{:.2f}\n'.format(names[i],names[j],score[0])
                fs.write(line)
            fs.write('       ===================== \n')
Exemple #15
0
    if args.seed is not None:
        import numpy as np
        import torch

        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    transforms = transforms.get_test_transforms_v2(args.input_size, use_crop=args.use_crop,
                                                   center_crop_ratio=args.center_crop_ratio, use_gray=args.use_gray)
    dataset = CustomDataset(args.test_img_dir, transforms)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
                                             shuffle=False, num_workers=args.num_workers, pin_memory=args.pin_memory)

    learner = face_learner(conf, inference=True)

    for imgs, labels in self.loader:
        imgs = imgs.to(conf.device)
        labels = labels.to(conf.device)
        self.optimizer.zero_grad()
        embeddings = self.model(imgs)
        thetas = self.head(embeddings, labels)

    device = 'cuda'
    total_scores = []
    total_indices = []
    total_file_names = []
    for step, (imgs, file_names) in enumerate(dataloader):
        if step > 0 and step % args.log_step_interval == 0:
            print(step, len(dataloader))
    parser.add_argument("-net", "--net_mode", help="which network, [ir, ir_se, mobilefacenet]",default='ir_se', type=str)
    parser.add_argument("-depth", "--net_depth", help="how many layers [50,100,152]", default=50, type=int)
    parser.add_argument('-lr','--lr',help='learning rate',default=1e-3, type=float)
    parser.add_argument("-b", "--batch_size", help="batch_size", default=96, type=int)
    parser.add_argument("-w", "--num_workers", help="workers number", default=3, type=int)
    parser.add_argument("-d", "--data_mode", help="use which database, [vgg, ms1m, emore, concat]", default='emore', type=str)
    # for continuing training from a trained model
    parser.add_argument("-lo", "--load_model",
                        help="whether model should be loaded before training, enter fixed part of file string",
                        default=None, type=str)
    parser.add_argument("-st", "--step",
                        help="the starting value for step variable which is used in model name when saving",
                        default=0, type=int)

    args = parser.parse_args()

    conf = get_config()
    
    if args.net_mode == 'mobilefacenet':
        conf.use_mobilfacenet = True
    else:
        conf.net_mode = args.net_mode
        conf.net_depth = args.net_depth    
    
    conf.lr = args.lr
    conf.batch_size = args.batch_size
    conf.num_workers = args.num_workers
    conf.data_mode = args.data_mode
    learner = face_learner(conf)

    learner.train(conf, args.epochs, fixed_str=args.load_model, step=args.step)
Exemple #17
0
    conf.num_sphere = args.num_sphere

    if args.pretrain:
        conf.pretrain = True
        assert len(args.pretrained_model_path)
        conf.pretrained_model_path = args.pretrained_model_path

    conf.drop_ratio = args.drop_ratio
    conf.m = args.m
    conf.lr = args.lr
    conf.work_path = Path(args.work_path)
    conf.model_path = conf.work_path / 'models'
    conf.log_path = conf.work_path / 'log'
    conf.save_path = conf.work_path / 'save'
    if os.path.exists(conf.work_path):
        pass
    else:
        os.mkdir(conf.work_path)
        os.mkdir(conf.model_path)
        os.mkdir(conf.save_path)
    conf.batch_size = args.batch_size
    conf.num_workers = args.num_workers
    conf.data_mode = args.data_mode
    # if conf.data_mode == 'webface':
    #     conf.epochs = 55
    #     conf.milestones = [30, 38, 45]

    learner = face_learner(conf, embedding_size=512)
    learner.train(conf, args.epochs)
Exemple #18
0
                        default=196,
                        type=int)
    parser.add_argument("-w",
                        "--num_workers",
                        help="workers number",
                        default=8,
                        type=int)
    parser.add_argument(
        "-d",
        "--data_mode",
        help="use which database, [vgg, ms1m, emore, concat, glintasia]",
        default='emore',
        type=str)
    args = parser.parse_args()

    conf = get_config()

    if args.net_mode == 'seesawFaceNet':
        conf.seesawFaceNet = True
    else:
        conf.net_mode = args.net_mode
        conf.net_depth = args.net_depth

    conf.lr = args.lr
    conf.batch_size = args.batch_size
    conf.num_workers = args.num_workers
    conf.data_mode = args.data_mode
    learner = face_learner(conf, pretrained=True)

    learner.train(conf, args.epochs)
Exemple #19
0
                        default=3,
                        type=int)
    parser.add_argument("-d",
                        "--data_mode",
                        help="use which database, [vgg, ms1m, emore, concat]",
                        default='emore',
                        type=str)
    parser.add_argument("--local_rank", default=0, type=int)
    args = parser.parse_args()
    print(args.local_rank)
    conf = get_config()

    torch.distributed.init_process_group(backend='nccl')
    torch.cuda.set_device(args.local_rank)

    if args.net_mode == 'mobilefacenet':
        conf.use_mobilfacenet = True
    else:
        conf.net_mode = args.net_mode
        conf.net_depth = args.net_depth

    conf.lr = args.lr
    conf.batch_size = args.batch_size
    conf.num_workers = args.num_workers
    conf.data_mode = args.data_mode
    learner = face_learner(conf, args)
    t1 = time.time()
    learner.train(conf, args.epochs)
    t2 = time.time()
    print('training time: ', t2 - t1)
Exemple #20
0
def main(args):
  start_time = time.time()
  merge_datasets = args.merge.split(',')
  # load dataset1 class_num & img_size
  prop = load_property(merge_datasets[0])
  image_size = prop.image_size
  print('image_size', image_size)
  learner = None
  conf = get_config()
  # load model
  if args.model:
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    print('loading', args.model)
    learner = face_learner(conf, train=False)
    learner.load_state(conf, '{}.pth'.format(args.model), model_only=True) # r100
    learner.model.eval()
  else:
    print('model is empty')
    assert args.t==0.0
  rec_list = []
  # read rec
  for ds in merge_datasets:
    path_imgrec = os.path.join(ds, 'train.rec')
    path_imgidx = os.path.join(ds, 'train.idx')
    imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')  # pylint: disable=redefined-variable-type
    rec_list.append(imgrec)

  id_list_map = {}
  all_id_list = []
  #
  for ds_id in range(len(rec_list)):
    id_list = []
    imgrec = rec_list[ds_id]
    s = imgrec.read_idx(0)
    header, _ = mx.recordio.unpack(s)
    assert header.flag>0
    print('header0 label', header.label)
    seq_identity = range(int(header.label[0]), int(header.label[1]))
    pp=0
    for identity in seq_identity:
      pp+=1
      if pp%10==0:
        print('processing id', pp)
      if learner is not None:
        embedding = get_embedding(args, imgrec, identity, image_size, learner.model, conf)
      else:
        embedding = None
      #print(embedding.shape)
      id_list.append( [ds_id, identity, embedding] )
    id_list_map[ds_id] = id_list
    if ds_id==0 or learner is None:
      all_id_list += id_list
      print(ds_id, len(id_list))
    else:
      X = []
      #data_hist = []
      for id_item in all_id_list:
        X.append(id_item[2])
      X = np.array(X)
      i = 0
      for i in range(len(id_list)):
        id_item = id_list[i]
        y = id_item[2]
        sim = np.dot(X, y.T) #memory error
        print(i)
        #data_hist.append(sim)
        idx = np.where(sim>=args.t)[0]
        if len(idx)>0:
          continue
        all_id_list.append(id_item)
        i += 1
      #rng_hist = np.arange(-1, 1.05, 0.05)
      #data_hist = np.asarray(data_hist)
      #res_hist, res_bins, _ = plt.hist(data_hist, rng_hist, rwidth = 0.8)
      #print(res_hist)
      #plt.show()

  if not os.path.exists(args.output):
    os.makedirs(args.output)
  writer = mx.recordio.MXIndexedRecordIO(os.path.join(args.output, 'train.idx'), os.path.join(args.output, 'train.rec'), 'w')
  idx = 1
  identities = []
  nlabel = -1
  for id_item in all_id_list:
    if id_item[1]<0:
      continue
    nlabel+=1
    ds_id = id_item[0]
    imgrec = rec_list[ds_id]
    id = id_item[1]
    s = imgrec.read_idx(id)
    header, _ = mx.recordio.unpack(s)
    a, b = int(header.label[0]), int(header.label[1])
    identities.append( (idx, idx+b-a) )
    for _idx in range(a,b):
      s = imgrec.read_idx(_idx)
      _header, _content = mx.recordio.unpack(s)
      nheader = mx.recordio.IRHeader(0, nlabel, idx, 0)
      s = mx.recordio.pack(nheader, _content)
      writer.write_idx(idx, s)
      idx+=1
  id_idx = idx
  for id_label in identities:
    _header = mx.recordio.IRHeader(1, id_label, idx, 0)
    s = mx.recordio.pack(_header, b'')
    writer.write_idx(idx, s)
    idx+=1
  _header = mx.recordio.IRHeader(1, (id_idx, idx), 0, 0)
  s = mx.recordio.pack(_header, b'')
  writer.write_idx(0, s)
  with open(os.path.join(args.output, 'property'), 'w') as f:
    f.write("%d,%d,%d"%(len(identities), image_size[0], image_size[1]))
  print('time : {}'.format(time.time()-start_time))
Exemple #21
0
    def test(self,conf,img_dir,update=False,view_score=False,view_error=False):
        #Load models
        mtcnn = MTCNN()
        learner = face_learner(conf, True)
        if conf.device.type == 'cpu':
            learner.load_state(conf,'cpu_final.pth',True,True)
        else:
            learner.load_state(conf,'final.pth',True,True)
        learner.model.eval()

        #Load Facebank
        if update:
            targets, names = prepare_facebank(conf, learner.model, mtcnn, False)
            print('facebank updated')
        else:
            targets, names = load_facebank(conf)
            print('facebank loaded')

        #Load Image list
        img_list = glob(img_dir + '**/*.jpg')
        acc = 0
        detect_err=0
        fails = []
        print(f"{'Found':^15}{'Name':^20}{'Result':^15}{'Score':^15}")
        pbar = enumerate(img_list)
        pbar = tqdm(pbar, total = len(img_list))
        for i, x in pbar:
            preds = []
            label = str(os.path.dirname(x))
            label = os.path.basename(label)
            image = Image.open(x)
            frame = cv2.imread(x,cv2.IMREAD_COLOR)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            try:
                bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size)
                bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
                bboxes = bboxes.astype(int)
                bboxes = bboxes + [-1,-1,1,1] # personal choice    
                results, score = learner.infer(conf, faces, targets, False)
                for idx,bbox in enumerate(bboxes):
                    print(f'{Label}: {score[idx]}')
                    if view_score:
                        frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame)
                    else:
                        frame = draw_box_name(bbox, names[results[idx] + 1], frame)
                    preds.append(names[results[idx]+1])

                if label in preds:
                    acc += 1
                else:
                    fails.append([label,preds])
                    # Image.fromarray(frame,'RGB').show()
            except Exception as ex:
                fails.append([label,ex])
                detect_err += 1

            f = len(bboxes)
            tf = str(True if label in preds else False)
            t = f'{f:^15}{label:^20}{tf:^15}{acc/(i+1):^15.4}'
            pbar.set_description(t)
        
        if detect_err>0:
            print(f'Detect Error: {detect_err}')
            if view_error:
                pp(fails)
            else:
                print(f'If you want to see details, make veiw_error True.')

        print(f'Accuracy: {acc/len(img_list)}')
def predict():

    #img=img_path#注意修改

    parser = argparse.ArgumentParser(description='for face verification')
    parser.add_argument("-s",
                        "--save",
                        help="whether save",
                        action="store_true")
    parser.add_argument('-th',
                        '--threshold',
                        help='threshold to decide identical faces',
                        default=1.54,
                        type=float)
    parser.add_argument("-u",
                        "--update",
                        default=True,
                        help="whether perform update the facebank",
                        action="store_true")
    parser.add_argument("-tta",
                        "--tta",
                        default=False,
                        help="whether testCode time augmentation",
                        action="store_true")
    parser.add_argument("-c",
                        "--score",
                        default=True,
                        help="whether show the confidence score",
                        action="store_true")
    parser.add_argument('--img_path',
                        '-p',
                        default='1.jpg',
                        type=str,
                        help='input the name of the recording person')
    args = parser.parse_args()
    mtcnn = MTCNN()
    conf = get_config(False)
    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    image = Image.open(
        'D:/code/Python/InsightFace-刘悦/InsightFace_Pytorch-master/1.jpg')

    if conf.device.type == 'cpu':
        learner.load_state(conf, 'ir_se50.pth', True, True)
    else:
        learner.load_state(conf, 'ir_se50.pth', True, True)
    learner.model.eval()
    print('learner loaded')

    if args.update:
        targets, names = prepare_facebank(conf,
                                          learner.model,
                                          mtcnn,
                                          tta=args.tta)
        print('facebank updated')
    else:
        targets, names = load_facebank(conf)
        print('facebank loaded')

    try:
        # image = Image.fromarray(img)
        # 利用mtcnn网络,对齐
        bboxes, faces = mtcnn.align_multi(image, conf.face_limit,
                                          conf.min_face_size)
        bboxes = bboxes[:, :
                        -1]  # shape:[10,4],only keep 10 highest possibiity faces
        bboxes = bboxes.astype(int)
        bboxes = bboxes + [-1, -1, 1, 1]  # personal choice
        results, score = learner.infer(conf, faces, targets, args.tta)
        print(results, score)
        for idx, bbox in enumerate(bboxes):
            print(names[results[idx] + 1])
            res = "name:" + names[results[idx] + 1]
    except:
        print('detect')
        res = "unknow"
    return res
    if args.benchmark:
        import torch.backends.cudnn as cudnn

        cudnn.benchmark = True
        cudnn.deterministic = True

    train_transform_func = getattr(transforms, args.train_transform_func_name)
    train_transforms = train_transform_func(
        args.input_size,
        args.use_random_crop,
        args.use_gray,
        only_use_pixel_transform=args.only_use_pixel_transform,
        use_flip=args.use_flip,
        use_blur=args.use_blur,
        no_transforms=args.no_transforms,
        use_center_crop=args.use_center_crop,
        center_crop_ratio=args.center_crop_ratio)

    val_transform_func = getattr(transforms, args.val_transform_func_name)
    val_transforms = val_transform_func(
        input_size=args.input_size,
        use_gray=args.use_gray,
        use_center_crop=args.use_center_crop,
        center_crop_ratio=args.center_crop_ratio)

    learner = face_learner(conf,
                           train_transforms=train_transforms,
                           val_transforms=val_transforms)

    learner.train(conf, args.epochs)
Exemple #24
0
 def train(self,conf):
     print(f'Train Start. Train dataset is {conf.data_mode}')
     learner = face_learner(conf,False)
     learner.train(conf, conf.epoch)
Exemple #25
0
    conf.ipabn = False
    conf.cvt_ipabn = False
    conf.fill_cache = False
    # conf.net_depth = 152
    # conf.net_mode = 'mobilefacenet'
    conf.use_chkpnt = False
    from Learner import FaceInfer, face_learner

    # learner = FaceInfer(conf, gpuid=range(conf.num_devs))
    # learner.load_state(
    #     resume_path=f'work_space/{args.modelp}/models/',
    #     latest=False,
    # )
    # learner.model.eval()

    learner = face_learner()
    learner.load_state(
        resume_path=f'work_space/{args.modelp}/models/',
        latest=False,
        load_optimizer=False,
        load_imp=False,
        load_head=False,
    )
    learner.model.eval()

from pathlib import Path

res = {}
for ds in [
        'agedb_30',
        'lfw',
def face_detection(video_path, result_csv_path):

    conf = get_config(False)
    mtcnn = MTCNN(select_largest=False, keep_all=True)
    print('mtcnn loaded')
    
    learner = face_learner(conf, True)
    learner.threshold = 0.5
    if conf.device.type == 'cpu':
        learner.load_state(conf, 'cpu_final.pth', True, True)
    else:
        learner.load_state(conf, 'final.pth', True, True)
    learner.model.eval()
    print('learner loaded')
    
    targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = True)
    print('facebank updated')
        
    cap = cv2.VideoCapture(video_path)
    cap.set(cv2.CAP_PROP_POS_MSEC, 0)
    
    fps = cap.get(cv2.CAP_PROP_FPS)
    #video writer
    #video_writer = cv2.VideoWriter(str(conf.facebank_path/'{}.avi'.format(args.save_name)),
#                                    cv2.VideoWriter_fourcc(*'XVID'), int(fps), (int(cap.get(3)),int(cap.get(4))))
    #video_writer = cv2.VideoWriter(str(conf.facebank_path/'{}.avi'.format(args.save_name)),
    #                               cv2.CAP_OPENCV_MJPEG, int(fps), (int(cap.get(3)),int(cap.get(4))))
    

    #### csv
    df = pd.DataFrame(columns=['frame_number', 'ID', 'LT', 'RB', 'score'])

    
    framecounter = 0
    while cap.isOpened():
        isSuccess,frame = cap.read()
        if isSuccess:            
#             image = Image.fromarray(frame[...,::-1]) #bgr to rgb
            image = Image.fromarray(frame)
            try:
                bboxes, faces = mtcnn.align_multi(image, conf.face_limit, 16)
                # mtcnn 에서 검출된 얼굴 BB 와 5point landmark
            except:
                bboxes = []
                faces = []
            if len(bboxes) == 0:
                print('no face')
                # continue
            else:
                bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
                bboxes = bboxes.astype(int)
                bboxes = bboxes + [-1,-1,1,1] # personal choice
                # 사람 identification   
                results, score = learner.infer(conf, faces, targets, True)
                for idx,bbox in enumerate(bboxes):
                    #frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame)
                    df = df.append({'frame_number':framecounter, 'ID':names[results[idx] + 1], 'LT':(bbox[0],bbox[1]), 'RB':(bbox[2],bbox[3]), 'score':'{:.2f}'.format(score[idx])}, ignore_index=True)

            #video_writer.write(frame)
            #print('{0:.2f}' .format(framecounter/duration*100))
            framecounter +=1
        else:
            break
      
    cap.release()
    #video_writer.release()
    df.to_csv(result_csv_path, index=False)
Exemple #27
0
import pims, cvbase as cvb
from lz import *

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='for face verification')
    parser.add_argument("-s", "--save", help="whether save", action="store_true", default=True)
    parser.add_argument('-th', '--threshold', help='threshold to decide identical faces', default=1.54, type=float)
    parser.add_argument("-u", "--update", help="whether perform update the facebank", action="store_true",
                        default=False)
    parser.add_argument("-tta", "--tta", help="whether test time augmentation", action="store_true", default=True)
    parser.add_argument("-c", "--score", help="whether show the confidence score", action="store_true", default=True)
    args = parser.parse_args()

    conf = get_config(False)

    learner = face_learner(conf, True)
    learner.threshold = args.threshold
    # conf.work_path = Path('work_space/')
    if conf.device.type == 'cpu':
        learner.load_state(conf, 'cpu_final.pth', True, True)
    else:
        learner.load_state(conf, 'final.pth', True, True)
    learner.model.eval()
    print('learner loaded')


    def extract_fea_from_img(img):
        img = img.copy()[..., ::-1].reshape(112, 112, 3)
        img = Image.fromarray(img)
        mirror = transforms.functional.hflip(img)
        with torch.no_grad():
Exemple #28
0
def main1():
    parser = argparse.ArgumentParser(description='for face verification')
    parser.add_argument("-e",
                        "--epochs",
                        help="training epochs",
                        default=8,
                        type=int)
    parser.add_argument("-net",
                        "--net_mode",
                        help="which network, [ir, ir_se, mobilefacenet]",
                        default='ir_se',
                        type=str)
    parser.add_argument("-depth",
                        "--net_depth",
                        help="how many layers [50,100,152]",
                        default=50,
                        type=int)
    parser.add_argument('-lr',
                        '--lr',
                        help='learning rate',
                        default=1e-3,
                        type=float)
    parser.add_argument("-b",
                        "--batch_size",
                        help="batch_size",
                        default=96,
                        type=int)
    parser.add_argument("-w",
                        "--num_workers",
                        help="workers number",
                        default=3,
                        type=int)
    parser.add_argument("-d",
                        "--data_mode",
                        help="use which database, [vgg, ms1m, emore, concat]",
                        default='emore',
                        type=str)
    parser.set_defaults(
        epochs=8,
        net='ir_se',
        net_depth='50',
        lr=1e-3,
        batch_size=96,
        num_workers=3,
        data_mode="ms1m",
    )
    args = parser.parse_args()

    conf = get_config(training=True)

    if args.net_mode == 'mobilefacenet':
        conf.use_mobilfacenet = True
    else:
        conf.net_mode = args.net_mode
        conf.net_depth = args.net_depth

    conf.lr = args.lr
    conf.batch_size = args.batch_size
    conf.num_workers = args.num_workers
    conf.data_mode = args.data_mode

    learner = face_learner(conf, inference=False, need_loader=False)
    # print(learner.find_lr(conf, ))
    # learner.train(conf, args.epochs)

    for i in range(1):
        for imgs, labels in learner.loader:
            imgs = imgs.cuda()
            labels = labels.cuda()
            print('ok', imgs.shape, labels.shape)
            embeddings = learner.model(imgs)
            thetas = learner.head(embeddings, labels)
            loss = conf.ce_loss(thetas, labels)
            import torch.autograd
            # fgg g
            grad = torch.autograd.grad(loss,
                                       embeddings,
                                       retain_graph=False,
                                       create_graph=False,
                                       only_inputs=True)[0].detach()
            embeddings_adv = embeddings + 0.01 * grad
            thetas_adv = learner.head(embeddings_adv, labels)
            loss_adv = conf.ce_loss(thetas_adv, labels)
            loss_adv.backward()
Exemple #29
0
def main():
    args = parser.parse_args()
    print('args.data_url', args.data_url)
    if conf.cloud:
        mox.file.copy_parallel(args.data_url, '/cache/face_train/')
        args.data_url = '/cache/face_train/'
        conf.use_data_folder = args.data_url

    if args.work_path:
        conf.work_path = Path(args.work_path)
        conf.model_path = conf.work_path / 'models'
        conf.log_path = conf.work_path / 'log'
        conf.save_path = conf.work_path / 'save'
    else:
        args.work_path = conf.work_path
    conf.update(args.__dict__)
    if conf.local_rank is not None:
        torch.cuda.set_device(conf.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method="env://")
        if torch.distributed.get_rank() != 0:
            set_stream_logger(logging.WARNING)
    # if osp.exists(conf.save_path):
    #     logging.info('ok')
    #     exit(1)
    # simplify_conf(conf)
    # exit(0)
    from Learner import face_learner
    # decs = msgpack_load('decs.pk')
    # conf.decs = decs
    learner = face_learner(conf, )
    # fstrs = learner.list_fixed_strs('work_space/sglpth.casia/models')
    # stps = learner.list_steps('work_space/sglpth.casia/models')
    # fstr = fstrs[np.argmax(stps)]
    # stt_dct = torch.load('work_space/sglpth.casia/models/model_' + fstr)
    # learner.model.module.load_state_dict_sglpth(stt_dct)
    # print(fstrs, stps, fstr, )

    if conf.get('load_from'):
        # p= 'r100.128.retina.clean.arc',
        # 'hrnet.retina.arc.3',
        # 'mbv3.retina.arc',
        # 'mbfc.lrg.retina.arc.s48',
        # 'effnet.casia.arc',
        # 'mbfc.retina.cl.distill.cont2',
        # 'mbfc2',
        # 'r18.l2sft',
        # 'r18.adamrg',
        # 'mbfc.se.elu.ms1m.radam.1',
        # 'mbfc.se.elu.specnrm.allbutdw.ms1m.adam.1',
        # 'mbfc.se.prelu.specnrm.ms1m.cesigsft.1',
        # 'irse.elu.ms1m',
        # 'irse.elu.casia.arc.2048',
        p = Path(conf.load_from)
        print(
            'try to load from ',
            p,
        )
        learner.load_state(
            resume_path=p,
            load_optimizer=False,
            load_head=conf.head_load,  # todo note!
            load_imp=False,
            latest=True,
            strict=False,
        )
    # simplify_conf(conf)
    learner.cloud_sync_log()
    # res = learner.validate_ori(conf, valds_names=('cfp_fp', ))
    # exit(0)
    # learner.calc_img_feas(out='work_space/mbfc.crash.h5')
    # log_lrs, losses = learner.find_lr(
    #                                   num=999,
    #                                   bloding_scale=1000)
    # losses[np.isnan(losses)] = 999
    # best_lr = 10 ** (log_lrs[np.argmin(losses)])
    # print('best lr is ', best_lr)
    # conf.lr = best_lr
    # exit(0)

    # learner.init_lr()
    # conf.tri_wei = 0
    # log_conf(conf)
    # learner.train(conf, 1, name='xent')

    learner.init_lr()
    simplify_conf(conf)
    if conf.head_init:
        learner.head_initialize()
    if conf.warmup:
        learner.warmup(conf, conf.warmup)
    learner.train_simple(conf, conf.epochs)

    # learner.train_dist(conf, conf.epochs)
    if conf.net_mode == 'sglpth':
        decs = learner.model.module.get_decisions()
        msgpack_dump(decs, 'decs.pk')

    # learner.train_cotching(conf, conf.epochs)
    # learner.train_cotching_accbs(conf, conf.epochs)
    # learner.train_ghm(conf, conf.epochs)
    # learner.train_with_wei(conf, conf.epochs)
    # learner.train_use_test(conf, conf.epochs)

    # res = learner.validate_ori(conf, )
    if not conf.cloud:
        from tools.test_ijbc3 import test_ijbc3
        res = test_ijbc3(conf, learner)
        tpr6, tpr4, tpr3 = res[0][1], res[1][1], res[2][1]
        learner.writer.add_scalar('ijbb/6', tpr6, learner.step)
        learner.writer.add_scalar('ijbb/4', tpr4, learner.step)
        learner.writer.add_scalar('ijbb/3', tpr3, learner.step)
    learner.writer.close()

    if conf.never_stop:
        img = torch.randn((conf.batch_size // 2, 3, conf.input_size,
                           conf.input_size)).cuda()
        learner.model.eval()
        logging.info('never stop')
        while True:
            _ = learner.model(img)
Exemple #30
0
 def update_facebank(self,conf):
     mtcnn = MTCNN()
     learner = face_learner(conf,True)
     learner.load_state(conf,'final.pth',True,True)
     learner.model.eval()
     _, _ = prepare_facebank(conf,learner.model,mtcnn,False)