Esempio n. 1
0
                return

            self.__in_pipe.pull_wait()
            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__session_runner.get_in_pipe().push(
                    SessionRunnable(self.__job, inference, run_on_thread=self.__run_session_on_thread))

    def __job(self, inference):
        self.__out_pipe.push(
            (self.__tf_sess.run(self.__z,
                                feed_dict={self.__x: inference.get_data()[0], self.__y: inference.get_data()[1]}),
             inference))

if __name__ == '__main__':
    session_runner = SessionRunner()
    session_runner.start()

    addOnGPU = SessionTest()

    ip = addOnGPU.get_in_pipe()
    op = addOnGPU.get_out_pipe()

    addOnGPU.use_session_runner(session_runner)
    addOnGPU.run()


    def send():
        while True:
            ip.push_wait()
            inference = Inference([random.randint(0, 100), random.randint(0, 100)])
from threading import Thread
from time import sleep

import cv2
import imutils
from py_tensorflow_runner.session_utils import SessionRunner

from py_face_detection.comparator_api.embedding_generator import EmbeddingGenerator
import glob
from proj_data.py_face_detection.infy_images import path as images_path
from proj_data.py_face_detection.embeddings import path as emb_path
import pickle

stop_flag = True

session_runner = SessionRunner()
generator = EmbeddingGenerator()
generator_ip = generator.get_in_pipe()
generator_op = generator.get_out_pipe()
generator.use_session_runner(session_runner)
generator.run()
#
# cap = cv2.VideoCapture('/home/developer/Downloads/video.mp4')
emb_dict = dict()
images_list = glob.glob(images_path.get() + "/**/*.JPG")
print(images_list)
image_dict = dict()
for images in images_list:
    person_name = images.split("/")[-2]
    if person_name not in image_dict.keys():
        image_dict[person_name] = list()
            ret, inference = detector.getOutPipe().pull(True)
        if ret:
            i_dets = inference.get_result()
            pipe.push(i_dets.get_annotated())


if __name__ == '__main__':

    fs = FlaskMovie()
    fs.start("0.0.0.0", 5000)

    session_runner = {}
    detector = {}

    cap = {}
    pipe = {}
    video_inputs = {0: 0, 1: 1}

    for i in video_inputs.keys():
        session_runner[i] = SessionRunner()
        session_runner[i].start()
        detector[i] = TFObjectDetector(PRETRAINED_ssd_mobilenet_v1_coco_2017_11_17, LABELMAP_mscoco, None,
                                           'tf_api_' + str(i), True)
        detector[i].use_session_runner(session_runner[i])
        detector[i].run()

        cap[i] = cv2.VideoCapture(video_inputs[i])
        pipe[i] = Pipe()
        fs.create('feed_' + str(i), pipe[i])
        Thread(target=detect_objects, args=(cap[i], pipe[i], detector[i], False)).start()