images_placeholder: images[batches[i - 1]:batches[i]],
                    phase_train_placeholder: False
                }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                embed_array.extend(embed.tolist())

            np.save('embeddings.npy', embed_array)


if __name__ == '__main__':

    sess = tf.Session()
    #Models
    pnet, rnet, onet = nets(sess, 'models/')
    sentiment_model = sent.Transfer_learning()
    yolo = YOLO()
    Video = 0
    if (Video == 1):
        print('Still')
        clip = VideoFileClip('Video/Video_1.mp4')
        fps = clip.fps
        print(fps)
        crops, new_frames, crop_idcs = process_video(
            clip.subclip(0, 10).iter_frames(), pnet, rnet, onet,
            sentiment_model)
        newer_frames = human_tracking(new_frames, yolo)
        np.save('face_crops.npy', crops)
        get_embeddings(crops)
        clip = ImageSequenceClip(new_frames, fps=fps)
        clip.write_videofile('Video_Output/newvideo.mp4', fps=fps)