Exemple #1
0
 def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
     self.model.eval()
     idx = 0
     embeddings = np.zeros([len(carray), conf.embedding_size])
     with torch.no_grad():
         while idx + conf.batch_size <= len(carray):
             batch = torch.tensor(carray[idx:idx + conf.batch_size])
             if tta:
                 fliped = batch.flip(-1)  # I do not test in this case
                 emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
                 embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
             else:
                 embeddings[idx:idx + conf.batch_size] = self.model(batch.to(conf.device)).cpu()
             idx += conf.batch_size
         if idx < len(carray):
             batch = torch.tensor(carray[idx:])
             if tta:
                 fliped = batch.flip(-1)
                 emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
                 embeddings[idx:] = l2_norm(emb_batch)
             else:
                 embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
     tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
     buf = gen_plot(fpr, tpr)
     roc_curve = Image.open(buf)
     roc_curve_tensor = trans.ToTensor()(roc_curve)
     return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Exemple #2
0
 def forward(self, img, normalize=True, return_norm=False, ):
     if img.shape[-1] == 112:
         with torch.no_grad():
             img = nn.functional.interpolate(img, scale_factor=2, mode='bilinear', align_corners=True)
     out = self.mod1(img)
     out = self.mod2(self.pool2(out))
     out = self.mod3(self.pool3(out))
     out = self.mod4(self.pool4(out))
     out = self.mod5(self.pool5(out))
     out = self.mod6(self.pool6(out))
     out = self.mod7(out)
     out = self.bn_out(out)
     out = self.output_layer(out)
     if hasattr(self, "classifier"):
         out = self.classifier(out)
     x = out
     x_norm, norm = l2_norm(x, axis=1, need_norm=True)
     if normalize:
         if return_norm:
             return x_norm, norm
         else:
             return x_norm  # the default one
     else:
         if return_norm:
             return x, norm
         else:
             return x
def face2fea(img):  # input img is bgr
    img = to_image(img)
    mirror = torchvision.transforms.functional.hflip(img)
    with torch.no_grad():
        fea = learner.model(conf.test_transform(img).cuda().unsqueeze(0))
        fea_mirror = learner.model(
            conf.test_transform(mirror).cuda().unsqueeze(0))
        fea = l2_norm(fea + fea_mirror).cpu().numpy().reshape(512)
    return fea
 def logits(self, features, normalize=True, return_norm=False):
     x = self.output_layer(features)
     x_norm, norm = l2_norm(x, axis=1, need_norm=True)
     if normalize:
         if return_norm:
             return x_norm, norm
         else:
             return x_norm
     else:
         if return_norm:
             return x, norm
         else:
             return x
Exemple #5
0
def prepare_facebank(conf, model, mtcnn, tta=True):
    model.eval()
    embeddings = []
    names = ['Unknown']
    for path in conf.facebank_path.iterdir():
        if path.is_file():
            continue
        else:
            embs = []
            for file in path.iterdir():
                if not file.is_file():
                    continue
                else:
                    try:
                        img = Image.open(file)
                    except:
                        continue
                    if img.size != (112, 112):
                        img = mtcnn.align(img)
                    with torch.no_grad():
                        if tta:
                            mirror = trans.functional.hflip(img)
                            emb = model(
                                conf.test_transform(img).to(
                                    conf.device).unsqueeze(0))
                            emb_mirror = model(
                                conf.test_transform(mirror).to(
                                    conf.device).unsqueeze(0))
                            embs.append(l2_norm(emb + emb_mirror))
                        else:
                            embs.append(
                                model(
                                    conf.test_transform(img).to(
                                        conf.device).unsqueeze(0)))
        if len(embs) == 0:
            continue
        embedding = torch.cat(embs).mean(0, keepdim=True)
        embeddings.append(embedding)
        names.append(path.name)
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, conf.facebank_path / 'facebank.pth')
    np.save(conf.facebank_path / 'names', names)
    return embeddings, names
Exemple #6
0
    def infer(self, conf, faces, target_embs, tta=False):
        """
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        """
        faces = faces_preprocessing(faces, conf.device)
        if tta:
            faces_emb = self.model(faces)
            hflip_emb = self.model(faces.flip(-1))  # image horizontal flip
            embs = l2_norm((faces_emb + hflip_emb)/2)  # take mean
        else:
            embs = self.model(faces)

        diff = embs.unsqueeze(-1) - target_embs.transpose(1, 0).unsqueeze(0)
        dist = torch.sum(torch.pow(diff, 2), dim=1)
        print(dist.item())
        minimum, min_idx = torch.min(dist, dim=1)
        min_idx[minimum > self.threshold] = -1  # if no match, set idx to -1
        return min_idx, minimum
Exemple #7
0
 def forward(self, x, normalize=True, return_norm=False, mode='train'):
     if x.shape[-1] == 112:
         with torch.no_grad():
             x = nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
     
     if mode == 'finetune':
         with torch.no_grad():
             x = self.mod1(x)
             x = self.mod2(x)
             x = self.tra2(x)
             x = self.mod3(x)
             x = self.tra3(x)
             x = self.mod4(x)
             x = self.tra4(x)
             x = self.mod5(x)
             x = self.bn_out(x)
     else:
         x = self.mod1(x)
         x = self.mod2(x)
         x = self.tra2(x)
         x = self.mod3(x)
         x = self.tra3(x)
         x = self.mod4(x)
         x = self.tra4(x)
         x = self.mod5(x)
         x = self.bn_out(x)
     x = self.output_layer(x)
     if hasattr(self, "classifier"):
         x = self.classifier(x)
     x_norm, norm = l2_norm(x, axis=1, need_norm=True)
     if normalize:
         if return_norm:
             return x_norm, norm
         else:
             return x_norm  # the default one
     else:
         if return_norm:
             return x, norm
         else:
             return x
Exemple #8
0
def update_facebank(conf, model, detector, tta=True):
    model.eval()
    faces_embs = torch.empty(0).to(conf.device)
    names = np.array(['Unknown'])
    for path in conf.facebank_path.iterdir():
        if path.is_file():
            continue
        faces = []
        for img_path in path.iterdir():
            face = cv2.imread(str(img_path))
            if face.shape[:2] != (112, 112):  # if img be not face
                face = detector.detect_align(face)[0]
                cv2.imwrite(img_path, face)
            else:
                face = torch.tensor(face).unsqueeze(0)
            faces.append(face)

        faces = torch.cat(faces)
        if len(faces.shape) <= 3:
            continue

        with torch.no_grad():
            faces = faces_preprocessing(faces, device=conf.device)
            if tta:
                face_emb = model(faces)
                hflip_emb = model(faces.flip(-1))  # image horizontal flip
                face_embs = l2_norm(face_emb + hflip_emb)
            else:
                face_embs = model(faces)

        faces_embs = torch.cat((faces_embs, face_embs.mean(0, keepdim=True)))
        names = np.append(names, path.name)

    torch.save(faces_embs, conf.facebank_path/'facebank.pth')
    np.save(conf.facebank_path/'names', names)
    print('from recognizer: facebank updated')
    return faces_embs, names
Exemple #9
0
 def forward(self, img, normalize=True, return_norm=False, ):
     out = self.mod1(img)
     out = self.mod2(self.pool2(out))
     out = self.mod3(self.pool3(out))
     out = self.mod4(out)
     out = self.mod5(out)
     out = self.mod6(out)
     out = self.mod7(out)
     out = self.bn_out(out)
     
     if hasattr(self, "classifier"):
         out = self.classifier(out)
     x = out
     x_norm, norm = l2_norm(x, axis=1, need_norm=True)
     if normalize:
         if return_norm:
             return x_norm, norm
         else:
             return x_norm  # the default one
     else:
         if return_norm:
             return x, norm
         else:
             return x
Exemple #10
0
def evaluate_ori(model, path, name, nrof_folds=10, tta=True):
    from utils import ccrop_batch, hflip_batch
    from models.model import l2_norm
    from verifacation import evaluate
    idx = 0
    from data.data_pipe import get_val_pair
    carray, issame = get_val_pair(path, name)
    carray = carray[:, ::-1, :, :]  # BGR 2 RGB!
    if use_mxnet:
        carray *= 0.5
        carray += 0.5
        carray *= 255.
    embeddings = np.zeros([len(carray), 512])
    if not use_mxnet:
        with torch.no_grad():
            while idx + bs <= len(carray):
                batch = torch.tensor(carray[idx:idx + bs])
                if tta:
                    # batch = ccrop_batch(batch)
                    fliped = hflip_batch(batch)
                    emb_batch = model(batch.cuda()) + model(fliped.cuda())
                    emb_batch = emb_batch.cpu()
                    embeddings[idx:idx + bs] = l2_norm(emb_batch)
                else:
                    embeddings[idx:idx + bs] = model(batch.cuda()).cpu()
                idx += bs
            if idx < len(carray):
                batch = torch.tensor(carray[idx:])
                if tta:
                    # batch = ccrop_batch(batch)
                    fliped = hflip_batch(batch)
                    emb_batch = model(batch.cuda()) + model(fliped.cuda())
                    emb_batch = emb_batch.cpu()
                    embeddings[idx:] = l2_norm(emb_batch)
                else:
                    embeddings[idx:] = model(batch.cuda()).cpu()
    else:
        from sklearn.preprocessing import normalize
        while idx + bs <= len(carray):
            batch = torch.tensor(carray[idx:idx + bs])
            if tta:
                fliped = hflip_batch(batch)
                emb_batch = model(batch) + model(fliped)
                embeddings[idx:idx + bs] = normalize(emb_batch)
            else:
                embeddings[idx:idx + bs] = model(batch)
            idx += bs
        if idx < len(carray):
            batch = torch.tensor(carray[idx:])
            if tta:
                fliped = hflip_batch(batch)
                emb_batch = model(batch) + model(fliped)
                embeddings[idx:] = normalize(emb_batch)
            else:
                embeddings[idx:] = model(batch)
    tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                   nrof_folds)
    roc_curve_tensor = None
    # buf = gen_plot(fpr, tpr)
    # roc_curve = Image.open(buf)
    # roc_curve_tensor = trans.ToTensor()(roc_curve)
    return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor