def embeddingVectors(path):
    # using pre-trained model
    nn4_small2_pretrained = create_model()
    nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')

    # Initialize the OpenFace face alignment utility
    aligment = AlignDlib('models/landmarks.dat')

    # Align image on face
    def align_image(img):
        return aligment.align(96, img, aligment.getLargestFaceBoundingBox(img),
                              landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    img = load_image(path)
    img = align_image(img)
    try:
        # scale RGB values to interval [0,1]
        img = (img / 255.).astype(np.float32)
    except TypeError:
        print("The image is not Clear to extract the Embeddings")
    else:
        # obtain embedding vector for image
        return nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
from openface.openface_model import create_model
from openface.preprocess_face_data import load_metadata
from openface.align import AlignDlib
import numpy as np
import cv2
import config
import os
from datetime import datetime

# using pre-trained model
print('load_model')
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')

# nn4_small2_pretrained.summary()
start = datetime.now()
# load customDataset
metadata = load_metadata(config.faceImagesPath, num=50)
print('load_image')


def load_image(path):
    img = cv2.imread(path, 1)
    # OpenCV loads images with color channels
    # in BGR order. So we need to reverse them
    return img[..., ::-1]


# Initialize the OpenFace face alignment utility
aligment = AlignDlib('models/landmarks.dat')
from openface.triplet_generator_data import triplet_generator
from keras import backend as K
from keras.models import Model, load_model
from keras.layers import Input, Layer
from openface.openface_model import create_model

nn4_small2 = create_model()

# Input for anchor, positive and negtive images
in_a = Input(shape=(96, 96, 3))
in_p = Input(shape=(96, 96, 3))
in_n = Input(shape=(96, 96, 3))

# Output for anchor, positive and negative embedding vectors
# The nn4_small model instance is shared (Siamese network)
emb_a = nn4_small2(in_a)
emb_p = nn4_small2(in_p)
emb_n = nn4_small2(in_n)


class TripletLossLayer(Layer):
    def __init__(self, alpha, **kwargs):
        self.alpha = alpha
        super(TripletLossLayer, self).__init__(**kwargs)

    def triplet_loss(self, inputs):
        a, p, n = inputs
        p_dist = K.sum(K.square(a - p), axis=1)
        n_dist = K.sum(K.square(a - n), axis=1)
        return K.sum(K.maximum(p_dist - n_dist + self.alpha, 0), axis=0)