Example #1
0
 def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
     self.model.eval()
     idx = 0
     entry_num = carray.size()[0]
     embeddings = np.zeros([entry_num, conf.embedding_size])
     with torch.no_grad():
         while idx + conf.batch_size <= entry_num:
             batch = carray[idx:idx + conf.batch_size]
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.cuda()) + self.model(fliped.cuda())
                 embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch).cpu().detach().numpy()
             else:
                 embeddings[idx:idx + conf.batch_size] = self.model(batch.cuda()).cpu().detach().numpy()
             idx += conf.batch_size
         if idx < entry_num:
             batch = carray[idx:]
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.cuda()) + self.model(fliped.cuda())
                 embeddings[idx:] = l2_norm(emb_batch).cpu().detach().numpy()
             else:
                 embeddings[idx:] = self.model(batch.cuda()).cpu().detach().numpy()
     tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Example #2
0
 def get_embeddings(self, conf, carray, tta=False):
     self.model.eval()
     idx = 0
     embeddings = np.zeros([len(carray), conf.embedding_size])
     with torch.no_grad():
         while idx + conf.batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + conf.batch_size])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(conf.device)) + self.model(
                     fliped.to(conf.device))
                 embeddings[idx:idx +
                            conf.batch_size] = l2_norm(emb_batch).cpu()
             else:
                 embeddings[idx:idx + conf.batch_size] = self.model(
                     batch.to(conf.device)).cpu()
             idx += conf.batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(conf.device)) + self.model(
                     fliped.to(conf.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
     return embeddings
 def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
     self.model.eval()
     idx = 0
     embeddings = np.zeros([len(carray), conf.embedding_size])
     with torch.no_grad():
         while idx + conf.batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + conf.batch_size])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(conf.device)) + self.model(
                     fliped.to(conf.device))
                 embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
             else:
                 embeddings[idx:idx + conf.batch_size] = self.model(
                     batch.to(conf.device)).cpu()
             idx += conf.batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = hflip_batch(batch)
                 emb_batch = self.model(batch.to(conf.device)) + self.model(
                     fliped.to(conf.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
     tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                    nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Example #4
0
def prepare_facebank(conf, imlst, model, mtcnn, tta = True, save = False):
    model.eval()
    #embeddings =  []
    ftoid = {}
    idinfo = []
    idx = 0
    embs = []
    for classnm, files in imlst.items():
        for f in files:
            if not Path(f).is_file():
                print('File {} not found'.format(f))
                continue
            else:
                try:
                    img = Image.open(f).convert('RGB')
                except:
                    print('Loading failed for {}'.format(imgfn))
                    continue
                try:
                    img = mtcnn.align(img)
                except:
                    img = img.resize((conf.input_size[0],conf.input_size[1]), Image.ANTIALIAS)
                    #print('mtcnn failed for {}'.format(f))
                #data = np.array((cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2GRAY),)*3).T
                #img = Image.fromarray(data.astype(np.uint8))
                data = np.asarray(img)
                img = Image.fromarray(data[:,:,::-1].astype(np.uint8))
                
                ftoid[f] = len(embs)
                idinfo.append((f,classnm))
                with torch.no_grad():
                    if tta:
                        mirror = trans.functional.hflip(img)
                        emb = l2_norm(model(conf.test_transform(img).unsqueeze(0).to(conf.device)))
                        emb_mirror = l2_norm(model(conf.test_transform(mirror).unsqueeze(0).to(conf.device)))
                        embs.append(l2_norm(emb + emb_mirror))
                    else:                        
                        embs.append(l2_norm(model(conf.test_transform(img).unsqueeze(0).to(conf.device))))
        #embedding = l2_norm(torch.cat(embs).mean(0,keepdim=True))
        #embeddings.append(embedding)
        #names.append(classnm)
    #embeddings = torch.cat(embeddings)
    embeddings = torch.cat(embs)
    if save:
        torch.save(embeddings, conf.facebank_path/'facebank.pth')
        with open(conf.facebank_path/'ftoid.pkl', 'wb') as outfile:
            pickle.dump(ftoid, outfile, protocol=pickle.HIGHEST_PROTOCOL)
        np.save(conf.facebank_path/'idinfo', idinfo)
    return embeddings,ftoid,idinfo
def extract_single_image(file,conf, model, mtcnn, tta = True):
    model.eval()
    embeddings =  []
    names = []
    embs = []
    img = Image.open(file)
    try:
        # if img.size != (112, 112):
        #     img = mtcnn.align(img)
        with torch.no_grad():
            if tta:
                mirror = trans.functional.hflip(img)
                emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
    except: 
        print('load error')
    
    if len(embs) == 0:
        return None, None
    embedding = torch.cat(embs).mean(0,keepdim=True)
    embeddings.append(embedding)
    names.append(file.split('/')[-1])
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    return embeddings, names
    def infer(self, conf, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1  # if no match, set idx to -1
        return min_idx, minimum
    def binfer(self, conf, faces, target_embs, tta=False):
        '''
        return raw scores for every class 
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))
            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        # print(dist)
        return dist.detach().cpu().numpy()
Example #8
0
    def evaluate(self, conf, val_loader, issame, nrof_folds=5, tta=False):
        self.model.model.eval()
        embeddings = torch.zeros(
            [len(val_loader.dataset), conf.embedding_size])
        with torch.no_grad():
            for idx, data in enumerate(
                    val_loader):  # data: batch_size * 3 * 112 * 112
                batch_size = data.size(0)
                if tta:
                    fliped = hflip_batch(data)
                    emb_batch = self.model.model(
                        data.to(conf.device)) + self.model.model(
                            fliped.to(conf.device))
                    embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
                else:
                    embeddings[idx:idx + batch_size] = self.model.model(
                        data.to(conf.device))  # embeddings: batch_size * 512

        tpr, fpr, accuracy, best_thresholds = calculate_roc(
            self.thresholds, embeddings, issame, nrof_folds)
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = trans.ToTensor()(roc_curve)

        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
def extract_folder_image(path, conf, model, mtcnn, tta = True):
    model.eval()
    embeddings =  []
    names = []
    embs = []
    for file in path.iterdir():
        if not file.is_file():
            continue
        else:
            try:
                img = Image.open(file)
                # if img.size != (112, 112):
                #     img = mtcnn.align(img)
                with torch.no_grad():
                    if tta:
                        mirror = trans.functional.hflip(img)
                        emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
                        emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                        embs.append(l2_norm(emb + emb_mirror))
                    else:
                        embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
            except: 
                continue
    if embs == []: 
        return None, None        
    embedding = torch.cat(embs).mean(0,keepdim=True)
    embeddings.append(embedding)
    names.append(path.name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    return embeddings, names
Example #10
0
def extract_feature(conf, model, loader, tta=False):
    features = torch.FloatTensor()
    gts = torch.LongTensor()
    for (inputs, labels) in tqdm(iter(loader)):
        gts = torch.cat((gts, labels), 0)

        if tta:
            fliped = hflip_batch(inputs)
            outputs = model(inputs.cuda())[1] + model(fliped.cuda())[1]
            feature = l2_norm(outputs).cpu()
        else:
            outputs = model(inputs.cuda())[1]
            feature = l2_norm(outputs).cpu()

        features = torch.cat((features, feature), 0)

    return features, gts
Example #11
0
    def train(self, conf, epochs):
        self.model.train()
        # logger.debug('model {}'.format(self.model))
        running_loss = 0.

        # 断点加载训练
        if conf.resume:
            logger.debug('resume...')
            self.load_state(conf, 'ir_se50.pth', from_save_folder=True)

        logger.debug('optimizer {}'.format(self.optimizer))
        for epoch in range(epochs):
            logger.debug('epoch {} started'.format(epoch))
            for imgs, labels in tqdm(iter(self.loader)):
                if self.step in self.milestones:
                    self.schedule_lr()
                imgs = imgs.cuda()
                labels = labels.cuda()

                self.optimizer.zero_grad()
                embeddings = self.model(imgs)
                inp_sp, inp_sn = convert_label_to_similarity(l2_norm(embeddings), labels)
                loss = self.head(inp_sp, inp_sn)
                # loss = self.head(embeddings, labels)

                if conf.fp16:  # we use optimier to backward loss
                    with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                running_loss += loss.item()
                self.optimizer.step()
                
                if self.step % self.board_loss_every == 0 and self.step != 0: #comment line
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.
                
                if self.step % self.evaluate_every == 0 and self.step != 0:  #comment line
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.agedb_30, self.agedb_30_issame)
                    self.board_val('agedb_30', accuracy, best_threshold, roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.lfw, self.lfw_issame)
                    self.board_val('lfw', accuracy, best_threshold, roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.cfp_fp, self.cfp_fp_issame)
                    self.board_val('cfp_fp', accuracy, best_threshold, roc_curve_tensor)
                    logger.debug('epoch {}, step {}, loss {:.4f}, acc {:.4f}'
                                 .format(epoch, self.step, loss.item(), accuracy))
                    self.model.train()
                if conf.local_rank == 0 and epoch >= 10 and self.step % self.save_every == 0 and self.step != 0:
                # if conf.local_rank == 0 and self.step % self.save_every == 0 and self.step != 0:
                    self.save_state(conf, epoch, accuracy)
                    
                self.step += 1
                
        self.save_state(conf, epoch, accuracy, to_save_folder=True, extra='final')
Example #12
0
    def extract_fea_from_img(img):
        img = img.copy()[..., ::-1].reshape(112, 112, 3)
        img = Image.fromarray(img)
        mirror = transforms.functional.hflip(img)
        with torch.no_grad():
            fea = learner.model(conf.test_transform(img).cuda().unsqueeze(0))
            fea_mirror = learner.model(conf.test_transform(mirror).cuda().unsqueeze(0))
            fea = l2_norm(fea + fea_mirror).cpu().numpy().reshape(512)

        return fea
def get_face_feature(conf, model, img, tta=False):
    if tta:
        mirror = trans.functional.hflip(img)
        emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
        emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
        feature = l2_norm(emb + emb_mirror)
    else:
        feature = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
        #feature = model(conf.test_transform(img).to(conf.device))

    return feature
Example #14
0
 def get_feature(self, pilImg, tta=False):
     if tta:
         mirror = trans.functional.hflip(pilImg)
         emb = self.model(
             self.test_transform(pilImg).to(self.device).unsqueeze(0))
         emb_mirror = self.model(
             self.test_transform(mirror).to(self.device).unsqueeze(0))
         embeddings = l2_norm(emb + emb_mirror)
     else:
         embeddings = self.model(
             self.test_transform(pilImg).to(self.device).unsqueeze(0))
     return embeddings
Example #15
0
def prepare_facebank(conf, model, D, tta = True):

    model.eval()
    embeddings =  []
    names = ['Unknown']

    for path in conf.facebank_path.iterdir():
        if path.is_file():
            continue
        else:
            embs = []
            for file in path.iterdir():
                if not file.is_file():
                    continue
                else:
                    # try:
                    #     img = Image.open(file)
                    # except:
                    #     continue

                    img = np.array(Image.open(file))
                    _, img = D.detect(img)

                    try:
                        img = cv2.resize(np.array(img[0]), (112, 112))
                    except:
                        print(file)
                        continue

                    # if img == [None] or img == []:
                    #     print('remove ' + str(file))
                    #     os.remove(file)
                    #     break
                    # img = img[0]

                    with torch.no_grad():
                        if tta:
                            mirror = trans.functional.hflip(img)
                            emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
                            emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                            embs.append(l2_norm(emb + emb_mirror))
                        else:                        
                            embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0,keepdim=True)
        embeddings.append(embedding)
        names.append(path.name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path/'facebank.pth')
    np.save(conf.facebank_path/'names', names)
    return embeddings, names
Example #16
0
    def evaluate(self, data_dir, names_idx, target_embs, tta=False):
        '''
        return raw scores for every class
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        self.model.eval()
        score_names = []
        score = []
        wrong_names = dict()
        test_dir = data_dir
        for path in test_dir.iterdir():
            if path.is_file():
                continue
            # print(path)
            for fil in path.iterdir():
                # print(fil)
                orig_name = ''.join(
                    [i for i in fil.name.strip().split('.')[0]])

                for name in names_idx.keys():
                    if name in orig_name:
                        score_names.append(names_idx[name])

                img = Image.open(str(fil))
                with torch.no_grad():
                    if tta:
                        mirror = trans.functional.hflip(img)
                        emb = self.model(
                            self.conf.test_transform(img).to(
                                self.conf.device).unsqueeze(0))
                        emb_mirror = self.model(
                            self.conf.test_transform(mirror).to(
                                self.conf.device).unsqueeze(0))
                        emb = l2_norm(emb + emb_mirror)
                    else:
                        emb = self.model(
                            self.conf.test_transform(img).to(
                                self.conf.device).unsqueeze(0))

                diff = emb.unsqueeze(-1) - target_embs.transpose(
                    1, 0).unsqueeze(0)
                dist = torch.sum(torch.pow(diff, 2), dim=1).cpu().numpy()
                score.append(np.exp(dist.dot(-1)))

                pred = np.argmax(score[-1])
                label = score_names[-1]
                if pred != label:
                    wrong_names[orig_name] = pred

        return score, score_names, wrong_names
Example #17
0
    def train(self, conf, epochs):
        self.model.train()
        running_loss = 0.
        if conf.restore:
            self.load_state(conf, conf.restore, True, False)
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            pbar = tqdm(iter(self.loader))
            for imgs, labels, ages, fns in pbar:
                imgs = imgs.to(conf.device)
                labels = labels.to(conf.device)
                self.optimizer.zero_grad()
                embeddings = l2_norm(self.model(imgs))
                thetas = self.head(embeddings, labels)
                loss = conf.ce_loss(thetas, labels) + conf.lam * conf.age_loss(
                    torch.norm(embeddings, 2, 1, True),
                    ages.to(conf.device).float().unsqueeze(-1))
                loss.backward()
                running_loss += loss.item()
                self.optimizer.step()
                pbar.set_description("loss {}".format(loss))
                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                if self.step % self.evaluate_every == 0 and self.step != 0:
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.agedb_30, self.agedb_30_issame)
                    self.board_val('agedb_30', accuracy, best_threshold,
                                   roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.lfw, self.lfw_issame)
                    self.board_val('lfw', accuracy, best_threshold,
                                   roc_curve_tensor)
                    accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                        conf, self.cfp_fp, self.cfp_fp_issame)
                    self.board_val('cfp_fp', accuracy, best_threshold,
                                   roc_curve_tensor)
                    self.model.train()
                if self.step % self.save_every == 0 and self.step != 0:
                    self.save_state(conf, accuracy)

                self.step += 1

        self.save_state(conf, accuracy, to_save_folder=True, extra='final')
Example #18
0
def get_embed(img_path, learner, conf):
    img = Image.open(img_path).convert('RGB').resize((112, 112),
                                                     Image.ANTIALIAS)
    # train_transform = trans.Compose([
    #     trans.RandomHorizontalFlip(),
    #     trans.ToTensor(),
    #     trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
    # ])
    mirror = trans.functional.hflip(img)
    emb = learner.model(conf.test_transform(img).to(conf.device).unsqueeze(0))
    emb_mirror = learner.model(
        conf.test_transform(mirror).to(conf.device).unsqueeze(0))
    return l2_norm(emb + emb_mirror)
def prepare_facebank(conf, model, mtcnn, tta=True):
    model.eval()
    embeddings = []
    names = ['Unknown']

    for root, dirs, files in os.walk(conf.facebank_path):
        if len(dirs) == 0:
            break

        for name in dirs:
            embs = []
            for name_root, dirs, files in os.walk(os.path.join(root, name)):
                for file in files:
                    try:
                        img = Image.open(os.path.join(name_root, file))
                    except:
                        print('open {} failed'.format(file))
                        continue
                    if img.size != (112, 112):
                        img = mtcnn.align(img)
                    with torch.no_grad():
                        if tta:
                            mirror = trans.functional.hflip(img)
                            emb = model(
                                conf.test_transform(img).to(
                                    conf.device).unsqueeze(0))
                            emb_mirror = model(
                                conf.test_transform(mirror).to(
                                    conf.device).unsqueeze(0))
                            embs.append(l2_norm(emb + emb_mirror))
                        else:
                            embs.append(
                                model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0)))

        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
        names.append(name)
        print('--------register person ---------- {}'.format(name))

    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path + '/facebank.pth')
    np.save(conf.facebank_path + '/names', names)
    return embeddings, names
Example #20
0
def prepare_facebank(conf, model, mtcnn, tta=True):
    model.eval()
    embeddings = []
    names = ['Unknown']
    for path in tqdm(conf.facebank_path.iterdir()):
        if path.is_file():
            continue
        else:
            embs = []
            for file in path.iterdir():
                if not file.is_file():
                    continue
                else:
                    try:
                        img = Image.open(file)
                    except:
                        continue
                    if img.size != (112, 112):
                        try:
                            img = mtcnn.align(img)
                        except:
                            continue
                    with torch.no_grad():
                        if tta:
                            mirror = trans.functional.hflip(img)
                            emb = model(
                                conf.test_transform(img).to(
                                    conf.device).unsqueeze(0))
                            emb_mirror = model(
                                conf.test_transform(mirror).to(
                                    conf.device).unsqueeze(0))
                            embs.append(l2_norm(emb + emb_mirror))
                        else:
                            embs.append(
                                model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0)))
        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
        names.append(path.name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path / 'facebank.pth')
    np.save(conf.facebank_path / 'names', names)
    return embeddings, names
Example #21
0
def prepare_facebank(conf, model, mtcnn, tta=True):
    train_dir = conf.emore_folder / 'imgs'
    embeddings = []
    names = [d.name for d in os.scandir(train_dir) if d.is_dir()]
    names.sort()
    name_to_idx = {cls_name: i for i, cls_name in enumerate(names)}

    model.eval()
    for n in names:
        embs = []
        path = os.path.join(train_dir, n)
        file_list = os.listdir(path)
        for file in file_list:
            file_path = os.path.join(path, file)
            try:
                img = Image.open(file_path)
            except:
                continue

            if img.size != (112, 112):
                img = mtcnn.align(img)
            with torch.no_grad():
                if tta:
                    mirror = trans.functional.hflip(img)
                    emb = model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0))
                    emb_mirror = model(
                        conf.test_transform(mirror).to(
                            conf.device).unsqueeze(0))
                    embs.append(l2_norm(emb + emb_mirror))
                else:
                    embs.append(
                        model(
                            conf.test_transform(img).to(
                                conf.device).unsqueeze(0)))

        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
    embeddings = torch.cat(embeddings)
    torch.save(embeddings, train_dir / 'facebank.pth')
    np.save(train_dir / 'names', names)
    return embeddings, names, name_to_idx
def face_compare(conf, model, faces, target_embs, tta=False):
    embs = []
    results = []
    scores = []
    for img in faces:
        if tta:
            mirror = trans.functional.hflip(img)
            emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
            emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
            embs.append(l2_norm(emb + emb_mirror))
        else:
            embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
    for em in embs:
        result, score = check_features(em, target_embs)

        results.append(result)
        scores.append(score)
    
    return results, score
Example #23
0
    def infer(self, conf, faces, target_embs, tta=False):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if tta:
                mirror = trans.functional.hflip(img)
                emb = self.model(
                    conf.test_transform(img).to(conf.device).unsqueeze(0))
                emb_mirror = self.model(
                    conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                embs.append(l2_norm(emb + emb_mirror))

            else:
                embs.append(
                    self.model(
                        conf.test_transform(img).to(conf.device).unsqueeze(0)))
        source_embs = torch.cat(embs)

        diff = source_embs.unsqueeze(-1) - target_embs.transpose(
            1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        print(dist)
        minimum, min_idx = torch.min(dist, dim=1)
        # print(minimum)
        print('Euclidean dist: {}'.format(min_idx))
        sims = np.dot(source_embs.detach().cpu().numpy(),
                      target_embs.detach().cpu().numpy().T)
        print(sims)
        minimum = sims
        min_idx = [np.where(sim == np.amax(sim))[0][0] for sim in sims]
        print('Similarity dist: {}'.format(min_idx))
        # min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1
        return min_idx, minimum
Example #24
0
    if len(boxes)>0:
        scores = []
        for box in boxes:
            scores.append(box[4])
        scores = np.array(scores)
        best_face_index = np.argmax(scores)
        best_face_box = boxes[best_face_index]
        cv2.rectangle(frame, (int(best_face_box[0]),int(best_face_box[1])), (int(best_face_box[2]),int(best_face_box[3])), (0,0,255), 3)
        warped_face2 = faces[best_face_index]
#        warped_face2 = np.array(warped_face)[...,::-1]
    else:
        print("NO face detect in Image2")

    embed1 = learner.model(conf.test_transform(warped_face1).to(conf.device).unsqueeze(0))
    embed2 = learner.model(conf.test_transform(warped_face2).to(conf.device).unsqueeze(0))
    embed1 = l2_norm(embed1)
    embed2 = l2_norm(embed2)
    
    pdist = torch.nn.PairwiseDistance(p=2)
    dist1 = pdist(embed1,embed2)
    dist1 = dist1.detach().cpu()
    
    
    embed1 = embed1.detach().cpu().numpy()
    embed2 = embed2.detach().cpu().numpy()
    
    dist = np.sqrt(np.sum(np.square(embed1 - embed2)))
    print(dist)
    print(dist1)

Example #25
0
    def evaluate_child(self, conf, carray, issame, nrof_folds=10, tta=True):
        self.model.eval()
        self.growup.eval()
        self.discriminator.eval()
        idx = 0
        embeddings1 = np.zeros([len(carray) // 2, conf.embedding_size])
        embeddings2 = np.zeros([len(carray) // 2, conf.embedding_size])

        carray1 = carray[::2, ]
        carray2 = carray[1::2, ]

        with torch.no_grad():
            while idx + conf.batch_size <= len(carray1):
                batch = torch.tensor(carray1[idx:idx + conf.batch_size])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.growup(self.model(batch.to(conf.device))).cpu() + \
                                self.growup(self.model(fliped.to(conf.device))).cpu()
                    embeddings1[idx:idx +
                                conf.batch_size] = l2_norm(emb_batch).cpu()
                else:
                    embeddings1[idx:idx + conf.batch_size] = self.growup(
                        self.model(batch.to(conf.device))).cpu()
                idx += conf.batch_size
            if idx < len(carray1):
                batch = torch.tensor(carray1[idx:])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.growup(self.model(batch.to(conf.device))).cpu() + \
                                self.growup(self.model(fliped.to(conf.device))).cpu()
                    embeddings1[idx:] = l2_norm(emb_batch).cpu()
                else:
                    embeddings1[idx:] = self.growup(
                        self.model(batch.to(conf.device))).cpu()

            while idx + conf.batch_size <= len(carray2):
                batch = torch.tensor(carray2[idx:idx + conf.batch_size])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)).cpu() + \
                                self.model(fliped.to(conf.device)).cpu()
                    embeddings2[idx:idx +
                                conf.batch_size] = l2_norm(emb_batch).cpu()
                else:
                    embeddings2[idx:idx + conf.batch_size] = self.model(
                        batch.to(conf.device)).cpu()
                idx += conf.batch_size
            if idx < len(carray2):
                batch = torch.tensor(carray2[idx:])
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.to(conf.device)).cpu() + \
                                self.model(fliped.to(conf.device)).cpu()
                    embeddings2[idx:] = l2_norm(emb_batch).cpu()
                else:
                    embeddings2[idx:] = self.model(batch.to(conf.device)).cpu()

        tpr, fpr, accuracy, best_thresholds = evaluate_child(
            embeddings1, embeddings2, issame, nrof_folds)
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = transforms.ToTensor()(roc_curve)
        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Example #26
0
def prepare_facebank(conf,
                     model,
                     mtcnn,
                     tta=True,
                     load_from_custom_dir=False,
                     dir_name_dict=None):
    '''
    :param conf: configurations
    :param model:
    :param mtcnn:
    :param tta: test time augmentation
    :param load_from_custom_dir: whether the model should load data from a custom directory
    :param dir_name_dict: a dictionary of name of each person in the dataset
    and the folder containing all their photos
    :return: embeddings of faces and their names (both lists)
    '''
    model.eval()
    embeddings = []
    names = [conf.unknown_constant]
    names = [conf.unknown_constant]

    if load_from_custom_dir:
        data_dirs_dict = dir_name_dict
    else:
        data_dirs_dict = {
            f: os.path.join(conf.facebank_path, f)
            for f in os.listdir(conf.facebank_path)
        }
    # for path in conf.facebank_path.iterdir():
    for name in data_dirs_dict:
        path = data_dirs_dict[name]
        if os.path.isfile(path):
            continue
        else:
            embs = []
            files = [os.path.join(path, f) for f in os.listdir(path)]
            for file in files:
                if not os.path.isfile(file):
                    continue
                else:
                    try:
                        img = Image.open(file)
                    except:
                        continue
                    if img.size != (112, 112):
                        img = mtcnn.align(img)
                    with torch.no_grad():
                        if tta:
                            mirror = trans.functional.hflip(img)
                            emb = model(
                                conf.test_transform(img).to(
                                    conf.device).unsqueeze(0))
                            emb_mirror = model(
                                conf.test_transform(mirror).to(
                                    conf.device).unsqueeze(0))
                            embs.append(l2_norm(emb + emb_mirror))
                        else:
                            embs.append(
                                model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0)))
        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
        names.append(name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path + '/' + 'facebank.pth')
    np.save(conf.facebank_path + '/' + 'names', names)
    return embeddings, names
Example #27
0
def prepare_facebank(conf, model, mtcnn, user_id, tta=True):
    model.eval()
    try:
        embeddings = torch.load(conf.model_path / 'overall.pth')
        #embeddings=embeddings.tolist()
        #print(embeddings[0])
        names = np.load(conf.model_path / 'names.npy')
        names = names.tolist()
    except:
        embeddings = []
        names = ['unknown']
    for path in conf.facebank_path.iterdir():
        if (path == conf.facebank_path / user_id):
            if path.is_file():
                continue
            else:
                embs = []
                for file in path.iterdir():
                    if not file.is_file():
                        continue
                    else:
                        try:
                            img = Image.open(file)
                        except:
                            continue
                        if img.size != (112, 112):
                            img = mtcnn.align(img)
                        with torch.no_grad():
                            if tta:
                                mirror = trans.functional.hflip(img)
                                emb = model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0))
                                emb_mirror = model(
                                    conf.test_transform(mirror).to(
                                        conf.device).unsqueeze(0))
                                embs.append(l2_norm(emb + emb_mirror))
                            else:
                                embs.append(
                                    model(
                                        conf.test_transform(img).to(
                                            conf.device).unsqueeze(0)))
        else:
            continue

        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        name = path.name
        torch.save(embedding, str(conf.model_path) + '/' + name + '.pth')
        if (path.name in names):
            print("User Already Registered")
        else:
            #embeddings=np.append(embeddings, embedding)
            #embeddings.append(embedding)
            #names=np.append(names,path.name)
            embeddings = torch.cat((embeddings, embedding), 0)
            names.append(path.name)
            print(names)
    #embeddings=torch.FloatTensor(embeddings)
    #embeddings = torch.cat(embeddings)
    print(embeddings.size())
    names = np.array(names)
    torch.save(embeddings, conf.model_path / 'overall.pth')
    np.save(conf.model_path / 'names', names)
    return embeddings, names
Example #28
0
    def analyze_angle(self, conf, name):
        '''
        Only works on age labeled vgg dataset, agedb dataset
        '''

        angle_table = [{
            0: set(),
            1: set(),
            2: set(),
            3: set(),
            4: set(),
            5: set(),
            6: set(),
            7: set()
        } for i in range(self.class_num)]
        # batch = 0
        # _angle_table = torch.zeros(self.class_num, 8, len(self.loader)//conf.batch_size).to(conf.device)
        if conf.resume_analysis:
            self.loader = []
        for imgs, labels, ages in tqdm(iter(self.loader)):

            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            ages = ages.to(conf.device)

            embeddings = self.model(imgs)
            if conf.use_dp:
                kernel_norm = l2_norm(self.head.module.kernel, axis=0)
                cos_theta = torch.mm(embeddings, kernel_norm)
                cos_theta = cos_theta.clamp(-1, 1)
            else:
                cos_theta = self.head.get_angle(embeddings)

            thetas = torch.abs(torch.rad2deg(torch.acos(cos_theta)))

            for i in range(len(thetas)):
                age_bin = 7
                if ages[i] < 26:
                    age_bin = 0 if ages[i] < 13 else 1 if ages[i] < 19 else 2
                elif ages[i] < 66:
                    age_bin = int(((ages[i] + 4) // 10).item())
                angle_table[labels[i]][age_bin].add(
                    thetas[i][labels[i]].item())

        if conf.resume_analysis:
            with open('analysis/angle_table.pkl', 'rb') as f:
                angle_table = pickle.load(f)
        else:
            with open('analysis/angle_table.pkl', 'wb') as f:
                pickle.dump(angle_table, f)

        count, avg_angle = [], []
        for i in range(self.class_num):
            count.append(
                [len(single_set) for single_set in angle_table[i].values()])
            avg_angle.append([
                sum(list(single_set)) / len(single_set)
                if len(single_set) else 0  # if set() size is zero, avg is zero
                for single_set in angle_table[i].values()
            ])

        count_df = pd.DataFrame(count)
        avg_angle_df = pd.DataFrame(avg_angle)

        with pd.ExcelWriter('analysis/analyze_angle_{}_{}.xlsx'.format(
                conf.data_mode, name)) as writer:
            count_df.to_excel(writer, sheet_name='count')
            avg_angle_df.to_excel(writer, sheet_name='avg_angle')
Example #29
0
def prepare_facebank(conf, model, mtcnn, tta=True):
    model.eval()
    ctx = mx.gpu(0)
    symbol_file_path = 'face_detection/symbol_farm/symbol_10_320_20L_5scales_v2_deploy.json'
    model_file_path = 'face_detection/saved_model/configuration_10_320_20L_5scales_v2/train_10_320_20L_5scales_v2_iter_1800000.params'
    face_detector = predict.Predict(
        mxnet=mx,
        symbol_file_path=symbol_file_path,
        model_file_path=model_file_path,
        ctx=ctx,
        receptive_field_list=cfg.param_receptive_field_list,
        receptive_field_stride=cfg.param_receptive_field_stride,
        bbox_small_list=cfg.param_bbox_small_list,
        bbox_large_list=cfg.param_bbox_large_list,
        receptive_field_center_start=cfg.param_receptive_field_center_start,
        num_output_scales=cfg.param_num_output_scales)
    embeddings = []
    names = ['Unknown']
    for path in conf.facebank_path.iterdir():
        if path.is_file():
            continue
        else:
            embs = []
            for filename in path.iterdir():
                if not filename.is_file():
                    continue
                else:
                    try:
                        print(filename)
                        image = Image.open(filename)
                        img = image
                        # img = np.array(image)
                        # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)

                        # faces, infer_time = face_detector.predict(img, resize_scale=0.5, score_threshold=0.4, top_k=10000, \
                        #                     NMS_threshold=0.2, NMS_flag=True, skip_scale_branch_list=[])
                        # img_size = 112
                        # print(len(faces))
                        # margin = 0

                        # img_h, img_w, _ = np.shape(image)

                        # for i, bbox in enumerate(faces):
                        #     x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
                        #     xw1 = max(int(x1 - margin ), 0)
                        #     yw1 = max(int(y1 - margin ), 0)
                        #     xw2 = min(int(x2 + margin ), img_w - 1)
                        #     yw2 = min(int(y2 + margin ), img_h - 1)
                        #     face =  cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
                        #     # img = Image.fromarray(face[...,::-1])
                        #     img = face
                        #     break
                    except Exception as e:
                        print(e)
                        continue

                    if img.size != (112, 112):
                        img = mtcnn.align(img)
                    print(type(img))
                    # cv2.imshow('window', img)
                    # img.show()
                    # if cv2.waitKey() == ord('q'):
                    # break
                    with torch.no_grad():
                        if tta:
                            img = trans.functional.to_grayscale(
                                img, num_output_channels=3)
                            mirror = trans.functional.hflip(img)
                            emb = model(
                                conf.test_transform(img).to(
                                    conf.device).unsqueeze(0))
                            emb_mirror = model(
                                conf.test_transform(mirror).to(
                                    conf.device).unsqueeze(0))

                            v_mirror = trans.functional.vflip(mirror)
                            v_emb_mirror = model(
                                conf.test_transform(v_mirror).to(
                                    conf.device).unsqueeze(0))

                            v_img = trans.functional.vflip(img)
                            v_img_mirror = model(
                                conf.test_transform(v_img).to(
                                    conf.device).unsqueeze(0))

                            embs.append(l2_norm(emb + emb_mirror))
                            # embs.append(l2_norm(emb + emb_mirror + v_emb_mirror + v_img_mirror))
                            # embs.append(emb)
                            # embs.append(emb_mirror)
                            # embs.append(v_emb_mirror)
                            # embs.append(v_img_mirror)
                        else:
                            embs.append(
                                model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0)))
        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
        names.append(path.name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path / 'facebank.pth')
    np.save(conf.facebank_path / 'names', names)
    return embeddings, names
Example #30
0
    def find_lr(self,
                conf,
                init_value=1e-8,
                final_value=10.,
                beta=0.98,
                bloding_scale=3.,
                num=None):
        if not num:
            num = len(self.loader)
        mult = (final_value / init_value)**(1 / num)
        lr = init_value
        for params in self.optimizer.param_groups:
            params['lr'] = lr
        self.model.train()
        avg_loss = 0.
        best_loss = 0.
        batch_num = 0
        losses = []
        log_lrs = []
        for i, (imgs, labels, ages, fns) in tqdm(enumerate(self.loader),
                                                 total=num):

            imgs = imgs.to(conf.device)
            labels = labels.to(conf.device)
            batch_num += 1

            self.optimizer.zero_grad()

            embeddings = self.model(imgs)
            thetas = self.head(l2_norm(embeddings), labels)
            loss = conf.ce_loss(thetas, labels) + conf.lam * conf.age_loss(
                torch.norm(embeddings, 2, 1, True),
                ages.to(conf.device).float().unsqueeze(-1))
            #Compute the smoothed loss
            avg_loss = beta * avg_loss + (1 - beta) * loss.item()
            self.writer.add_scalar('avg_loss', avg_loss, batch_num)
            smoothed_loss = avg_loss / (1 - beta**batch_num)
            self.writer.add_scalar('smoothed_loss', smoothed_loss, batch_num)
            #Stop if the loss is exploding
            if batch_num > 1 and smoothed_loss > bloding_scale * best_loss:
                print('exited with best_loss at {}'.format(best_loss))
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses
            #Record the best loss
            if smoothed_loss < best_loss or batch_num == 1:
                best_loss = smoothed_loss
            #Store the values
            losses.append(smoothed_loss)
            log_lrs.append(math.log10(lr))
            self.writer.add_scalar('log_lr', math.log10(lr), batch_num)
            #Do the SGD step
            #Update the lr for the next step

            loss.backward()
            self.optimizer.step()

            lr *= mult
            for params in self.optimizer.param_groups:
                params['lr'] = lr
            if batch_num > num:
                plt.plot(log_lrs[10:-5], losses[10:-5])
                return log_lrs, losses