Example #1
0
def facenet_template(embedding, annotation_type, fixed_positions=None):
    """
    Facenet baseline template.
    This one will crop the face at :math:`160 \\times 160`

    Parameters
    ----------

      embedding: obj
         Transformer that takes a cropped face and extract the embeddings

      annotation_type: str
         Type of the annotations (e.g. `eyes-center')

      fixed_positions: dict
         Set it if in your face images are registered to a fixed position in the image
    """
    # DEFINE CROPPING
    cropped_image_size = (160, 160)

    if annotation_type == "eyes-center" or annotation_type == "bounding-box":
        # Hard coding eye positions for backward consistency
        # cropped_positions = {
        cropped_positions = dnn_default_cropping(cropped_image_size,
                                                 annotation_type="eyes-center")
        if annotation_type == "bounding-box":
            # This will allow us to use `BoundingBoxAnnotatorCrop`
            cropped_positions.update({
                "topleft": (0, 0),
                "bottomright": cropped_image_size
            })

    else:
        cropped_positions = dnn_default_cropping(cropped_image_size,
                                                 annotation_type)

    annotator = MTCNN(min_size=40, factor=0.709, thresholds=(0.1, 0.2, 0.2))

    # ASSEMBLE TRANSFORMER
    transformer = embedding_transformer(
        cropped_image_size=cropped_image_size,
        embedding=embedding,
        cropped_positions=cropped_positions,
        fixed_positions=fixed_positions,
        color_channel="rgb",
        annotator=annotator,
    )

    algorithm = Distance()

    return PipelineSimple(transformer, algorithm)
Example #2
0
def arcface_template(embedding, annotation_type, fixed_positions=None):
    # DEFINE CROPPING
    cropped_image_size = (112, 112)

    if annotation_type == "eyes-center" or annotation_type == "bounding-box":
        # Hard coding eye positions for backward consistency
        # cropped_positions = {
        cropped_positions = cropped_positions_arcface()
        if annotation_type == "bounding-box":
            # This will allow us to use `BoundingBoxAnnotatorCrop`
            cropped_positions.update(
                {"topleft": (0, 0), "bottomright": cropped_image_size}
            )

    elif isinstance(annotation_type, list):
        cropped_positions = cropped_positions_arcface(annotation_type)
    else:
        cropped_positions = dnn_default_cropping(
            cropped_image_size, annotation_type
        )

    annotator = MTCNN(min_size=40, factor=0.709, thresholds=(0.1, 0.2, 0.2))
    transformer = embedding_transformer(
        cropped_image_size=cropped_image_size,
        embedding=embedding,
        cropped_positions=cropped_positions,
        fixed_positions=fixed_positions,
        color_channel="rgb",
        annotator=annotator,
    )

    algorithm = Distance()

    return PipelineSimple(transformer, algorithm)
Example #3
0
def oxford_vgg2_resnets(
    model_name, annotation_type, fixed_positions=None, memory_demanding=False
):
    """
    Get the pipeline for the resnet based models from Oxford.
    All these models were training the the VGG2 dataset.

    Models taken from: https://www.robots.ox.ac.uk/~albanie

    Parameters
    ----------
      model_name: str
         One of the 4 models available (`resnet50_scratch_dag`, `resnet50_ft_dag`, `senet50_ft_dag`, `senet50_scratch_dag`).

      annotation_type: str
         Type of the annotations (e.g. `eyes-center')

      fixed_positions: dict
         Set it if in your face images are registered to a fixed position in the image
    """

    # DEFINE CROPPING
    cropped_image_size = (224, 224)

    if annotation_type == "eyes-center":
        # Coordinates taken from : https://www.merlin.uzh.ch/contributionDocument/download/14240
        cropped_positions = {"leye": (100, 159), "reye": (100, 65)}
    else:
        cropped_positions = dnn_default_cropping(
            cropped_image_size, annotation_type
        )

    transformer = embedding_transformer(
        cropped_image_size=cropped_image_size,
        embedding=OxfordVGG2Resnets(
            model_name=model_name, memory_demanding=memory_demanding
        ),
        cropped_positions=cropped_positions,
        fixed_positions=fixed_positions,
        color_channel="rgb",
        annotator="mtcnn",
    )

    algorithm = Distance()
    from bob.bio.base.pipelines import PipelineSimple

    return PipelineSimple(transformer, algorithm)
Example #4
0
def vgg16_oxford_baseline(annotation_type, fixed_positions=None):
    """
    Get the VGG16 pipeline which will crop the face :math:`224 \\times 224`
    use the :py:class:`VGG16_Oxford`

    Parameters
    ----------

      annotation_type: str
         Type of the annotations (e.g. `eyes-center')

      fixed_positions: dict
         Set it if in your face images are registered to a fixed position in the image
    """

    # DEFINE CROPPING
    cropped_image_size = (224, 224)

    if annotation_type == "eyes-center" or annotation_type == "bounding-box":
        # Hard coding eye positions for backward consistency
        # cropped_positions = {
        cropped_positions = {"reye": (112, 82), "leye": (112, 142)}
        if annotation_type == "bounding-box":
            # This will allow us to use `BoundingBoxAnnotatorCrop`
            cropped_positions.update(
                {"topleft": (0, 0), "bottomright": cropped_image_size}
            )
    else:
        cropped_positions = dnn_default_cropping(
            cropped_image_size, annotation_type
        )

    annotator = MTCNN(min_size=40, factor=0.709, thresholds=(0.1, 0.2, 0.2))
    transformer = embedding_transformer(
        cropped_image_size=cropped_image_size,
        embedding=VGG16_Oxford(),
        cropped_positions=cropped_positions,
        fixed_positions=fixed_positions,
        color_channel="rgb",
        annotator=annotator,
    )

    algorithm = Distance()

    return PipelineSimple(transformer, algorithm)
Example #5
0
def afffe_baseline(
    annotation_type,
    fixed_positions=None,
    memory_demanding=False,
    device=torch.device("cpu"),
):
    """
    Get the AFFFE pipeline which will crop the face :math:`224 \\times 224`
    use the :py:class:`AFFFE_2021`

    Parameters
    ----------

      annotation_type: str
         Type of the annotations (e.g. `eyes-center')

      fixed_positions: dict
         Set it if in your face images are registered to a fixed position in the image
    """

    # DEFINE CROPPING
    cropped_image_size = (224, 224)

    if annotation_type == "eyes-center":
        # Hard coding eye positions for backward consistency
        cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
    else:
        cropped_positions = dnn_default_cropping(
            cropped_image_size, annotation_type
        )

    transformer = embedding_transformer(
        cropped_image_size=cropped_image_size,
        embedding=AFFFE_2021(memory_demanding=memory_demanding, device=device),
        cropped_positions=cropped_positions,
        fixed_positions=fixed_positions,
        color_channel="rgb",
        annotator="mtcnn",
    )

    algorithm = Distance()
    from bob.bio.base.pipelines import PipelineSimple

    return PipelineSimple(transformer, algorithm)