def embed_faces(in_image_paths, save_embeddings=True, image_size=160, replace_images=False):
    """Crops face(faces) in image and return the cropped area(areas) along with an embedding(embeddings).

    
    
    Parameters
    ----------
    in_image_paths : list
        Path to images to crop
    save_embeddings : bool, optional
        Save the embeddings, by default True
    image_size : int, optional
        [description], by default 160
    replace_images : bool, optional
        [description], by default False
    
    Returns
    -------
    [type]
        [description]
    """    

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    mtcnn = MTCNN(image_size=image_size, keep_all=True, device=device)
    resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
    
    all_embeddings = []
    all_faces = []
    all_boxes = []
    for image_path in in_image_paths:
        try :
            image = Image.open(image_path)
            boxes, _ = mtcnn.detect(image)

            for index, box in enumerate(boxes):
                if replace_images:
                    os.remove(image_path)
                    face = facenet_utils.detect_face.extract_face(image, box=box, save_path=image_path, image_size=image_size)
                else:
                    face = facenet_utils.detect_face.extract_face(image, box=box, image_size=image_size)
                
                face = prewhiten(face)
                aligned = torch.stack([face]).to(device)
                embedding = resnet(aligned).detach().cpu()


                if save_embeddings is not None:
                    dir_path, file_name = os.path.split(image_path)
                    fname, _ = os.path.splitext(file_name)
                    out_embedding_path = os.path.join(dir_path.replace('images', 'embeddings'), fname+str(index)+'.npy')
                    np.save(out_embedding_path, embedding)  
            
                all_embeddings.append(embedding.cpu().detach().numpy()[0])
                all_faces.append(face)
                all_boxes.append(box)

        except Exception as e:
            logger.warning('Bad Image: {0}. Skipping..'.format(e))
    
    return all_embeddings, all_faces, all_boxes
    def get_embedding(self):

        tensor = facenet_utils.detect_face.extract_face(self.img,
                                                        box=self.bbox,
                                                        image_size=160)

        aligned = torch.stack([prewhiten(tensor)]).to(device)

        embedding = resnet(aligned).detach().cpu().detach().numpy()[0]
        return embedding
Exemplo n.º 3
0
 def pre_process(self, image):
     """
     Redimensiona e preprocessa imagem para extracao de features
     :param image: imagem do cv2
     :return: img_tensor pre-processado para extracao de features
     """
     try:
         image = cv2.resize(image, (input_image_size, input_image_size),
                            interpolation=cv2.INTER_AREA)
     except cv2.error:
         return None
     img_tensor = functional.to_tensor(np.float32(image)).to(self.device)
     return prewhiten(img_tensor)
Exemplo n.º 4
0
def embed_faces(in_image_paths=[],
                out_paths=[],
                return_values=['embeddings', 'out_paths'],
                images=None):
    """Crops faces) in image and return the cropped area(areas) along with an embedding(embeddings).

    Parameters
    ----------
    in_image_paths : list
        Path to images to crop
    save_embeddings : bool, optional
        Save the embeddings, by default True
    image_size : int, optional
        [description] , by default 160
    replace_images : bool, optional
        [description], by default False
    
    Returns
    -------
    all_embeddings : list
        A list of embeddings for each face. Each embeddings is of size (512). We use IneptionResnetV1 pretrained on vggface2.
    all_faces : list
        A list of ROIs for each face. 
    all_boxes : list
        A list of bboxes for each face

    Warning
    -------
    Make sure that the input image only contains one face!
    """

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    logger.info(
        'using: {0}'.format('cuda:0' if torch.cuda.is_available() else 'cpu'))
    resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)

    ret_dct = defaultdict(lambda: [])
    if not in_image_paths:
        in_image_paths = ['' for _ in images]
    if not out_paths:
        out_paths = ['' for _ in in_image_paths]
    assert len(in_image_paths) == len(out_paths)

    for i, image_path in enumerate(in_image_paths):
        out_path = out_paths[i]
        # try :
        image = Image.open(image_path) if image_path != '' else images[i]

        face = np.array(image, dtype=np.float32)
        face = face.transpose((2, 0, 1))
        face = prewhiten(torch.tensor(face))
        aligned = torch.stack([face]).to(device)  # ?
        embedding = resnet(aligned).detach().cpu()

        if out_path:
            try:
                np.save(out_path, embedding)
            except OSError as e:
                basename = os.path.basename(out_path)
                fname, ext = os.path.splitext(basename)

                letters = string.ascii_lowercase
                random_string = ''.join(
                    random.choice(letters) for i in range(16))
                shortened = out_path.replace(fname, random_string)
                np.save(shortened, embedding)

                logger.warning('{0}\nGenerated new path {1}'.format(
                    e, shortened))

        if 'out_paths' in return_values:
            ret_dct['out_paths'].append(out_path)

        if 'embeddings' in return_values:
            ret_dct['embeddings'].append(embedding)

        # except Exception as e:
        #     logger.warning('Bad Image: {0}. Skipping..'.format(e))

    return ret_dct