コード例 #1
0
def detect_objects(cap, pipe, detector, default):
    if not default:
        ret_pipe = Pipe()
    else:
        ret_pipe = None

    def start_cam():
        while True:
            ret, image = cap.read()
            if not ret:
                continue
            inference = TFObjectDetector.Inference(image.copy(), LABELMAP_mscoco, return_pipe=ret_pipe)
            detector.get_in_pipe().push_wait()
            detector.get_in_pipe().push(inference)

    Thread(target=start_cam).start()
    while True:
        if not default:
            ret, inference = ret_pipe.pull(True)
            if not ret:
                ret_pipe.pull_wait()
            else:
                ret_pipe.flush()
        else:
            detector.getOutPipe().pull_wait()
            ret, inference = detector.getOutPipe().pull(True)
        if ret:
            i_dets = inference.get_result()
            pipe.push(i_dets.get_annotated())
コード例 #2
0
class SessionTest:

    def __init__(self, flush_pipe_on_read=False):
        self.__thread = None
        self.__flush_pipe_on_read = flush_pipe_on_read
        self.__run_session_on_thread = False
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

    def __in_pipe_process(self, inference):
        a, b = inference.get_input()
        inference.set_data([a*a, b*b])
        return inference

    def __out_pipe_process(self, result):
        result, inference = result
        inference.set_result(math.sqrt(result))
        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_threading(self, run_on_thread=True):
        self.__run_session_on_thread = run_on_thread

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        self.__tf_sess = session_runner.get_session()

        self.__x = tf.placeholder(tf.int32, None)
        self.__y = tf.placeholder(tf.int32, None)
        self.__z = tf.add(self.__x, self.__y)

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__out_pipe.close()
                return

            self.__in_pipe.pull_wait()
            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__session_runner.get_in_pipe().push(
                    SessionRunnable(self.__job, inference, run_on_thread=self.__run_session_on_thread))

    def __job(self, inference):
        self.__out_pipe.push(
            (self.__tf_sess.run(self.__z,
                                feed_dict={self.__x: inference.get_data()[0], self.__y: inference.get_data()[1]}),
             inference))
コード例 #3
0
class SessionRunner:
    __config = tf.ConfigProto(log_device_placement=False)
    __config.gpu_options.allow_growth = True
    __counter = 0

    def __init__(self, skip=False):
        self.__self_dir_path = dirname(realpath(__file__))
        self.__thread = None
        self.__pause_resume = None
        self.__tf_sess = tf.Session(config=self.__config)
        self.__in_pipe = Pipe()
        self.__skip = skip

    def get_in_pipe(self):
        return self.__in_pipe

    def get_session(self):
        return self.__tf_sess

    def start(self):
        if self.__thread is None:
            self.__pause_resume = threading.Event()
            self.__thread = Thread(target=self.__start)
            self.__thread.start()

    def stop(self):
        if self.__thread is not None:
            self.__thread = None

    def __start(self):
        while self.__thread:
            self.__in_pipe.pull_wait()
            ret, sess_runnable = self.__in_pipe.pull(self.__skip)
            if ret:
                if type(sess_runnable) is not SessionRunnable:
                    raise Exception("Pipe elements must be a SessionRunnable")
                sess_runnable.execute(self.__tf_sess)
コード例 #4
0
class FNEmbeddingsGenerator:
    class Inference(Inference):
        def __init__(self, input, return_pipe=None, meta_dict=None):
            super().__init__(input, return_pipe, meta_dict)

    def __init__(self,
                 model_name=PRETRAINED_20180408_102900,
                 graph_prefix=None,
                 flush_pipe_on_read=False,
                 face_resize=160):

        facenet.load_model(facenet_path.get(model_name))
        self.__flush_pipe_on_read = flush_pipe_on_read

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

        self.__run_session_on_thread = False
        self.__face_resize = face_resize
        if not graph_prefix:
            self.__graph_prefix = ''
        else:
            self.__graph_prefix = graph_prefix + '/'

    def __in_pipe_process(self, inference):
        resized = inference.get_input()
        prewhitened = facenet.prewhiten(resized)
        reshaped = prewhitened.reshape(-1, self.__face_resize,
                                       self.__face_resize, 3)
        # print(reshaped.shape)
        inference.set_data(reshaped)
        return inference

    def __out_pipe_process(self, result):
        result, inference = result
        inference.set_result(result)
        if inference.get_return_pipe():
            return '\0'

        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_threading(self, run_on_thread=True):
        self.__run_session_on_thread = run_on_thread

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        self.__tf_sess = session_runner.get_session()

        self.__images_placeholder = self.__tf_sess.graph.get_tensor_by_name(
            self.__graph_prefix + "input:0")
        self.__embeddings = self.__tf_sess.graph.get_tensor_by_name(
            self.__graph_prefix + "embeddings:0")
        self.__phase_train_placeholder = self.__tf_sess.graph.get_tensor_by_name(
            self.__graph_prefix + "phase_train:0")
        self.__embedding_size = self.__embeddings.get_shape()[1]

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__out_pipe.close()
                return

            self.__in_pipe.pull_wait()
            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__session_runner.get_in_pipe().push(
                    SessionRunnable(
                        self.__job,
                        inference,
                        run_on_thread=self.__run_session_on_thread))

    def __job(self, inference):
        self.__out_pipe.push(
            (self.__tf_sess.run(self.__embeddings,
                                feed_dict={
                                    self.__images_placeholder:
                                    inference.get_data(),
                                    self.__phase_train_placeholder: False
                                }), inference))

    def stop(self):
        self.__thread = None

    def save_for_serving(self, save_path, model_version):
        path = os.path.join(save_path, str(model_version))
        builder = tf.saved_model.builder.SavedModelBuilder(path)

        prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(
            inputs={
                'images':
                tf.saved_model.utils.build_tensor_info(
                    self.__images_placeholder),
                'phase':
                tf.saved_model.utils.build_tensor_info(
                    self.__phase_train_placeholder)
            },
            outputs={
                'embeddings':
                tf.saved_model.utils.build_tensor_info(self.__embeddings)
            },
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)

        legacy_init_op = tf.group(tf.tables_initializer(),
                                  name='legacy_init_op')

        builder.add_meta_graph_and_variables(
            self.__tf_sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'calculate_embeddings': prediction_signature,
            })

        builder.save()
        print("model saved successfullt")
コード例 #5
0
class EmbeddingGenerator:
    class Inference(Inference):

        def __init__(self, input, return_pipe=None, meta_dict=None):
            super().__init__(input, return_pipe, meta_dict)

    def __init__(self, face_size=160):

        self.generator = FNEmbeddingsGenerator(face_resize=face_size)
        self.generator.use_threading()
        self.generator_ip = self.generator.get_in_pipe()
        self.generator_op = self.generator.get_out_pipe()

        self.detector = FaceDetectorMTCNN(face_resize = face_size)
        self.detector.use_threading()
        self.detector_ip = self.detector.get_in_pipe()
        self.detector_op = self.detector.get_out_pipe()

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

        self.__run_session_on_thread = False
        self.__face_size=face_size


    def __in_pipe_process(self, inference):
        return inference

    def __out_pipe_process(self, result):
        result, inference = result
        inference.set_result(result)
        if inference.get_return_pipe():
            return '\0'

        return inference


    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_session_runner(self, session_runner):
        self.session_runner = session_runner
        self.generator.use_session_runner(session_runner)
        self.detector.use_session_runner(session_runner)


    def step_1(self):
        while self.__thread:
            self.detector_ip.push_wait()
            self.__in_pipe.pull_wait()
            ret, inf = self.__in_pipe.pull()
            if not ret:
                continue
            image = inf.get_input()
            image = imutils.resize(image, width=None)
            inference = FaceDetectorMTCNN.Inference(image)
            inference.set_meta('EmbeddingGenerator.Inference', inf)
            self.detector_ip.push(inference)
        self.detector.stop()

    def step_2(self):
        while self.__thread:
            self.generator_ip.push_wait()
            self.detector_op.pull_wait()
            ret, inference = self.detector_op.pull(True)
            if ret:
                faces = inference.get_result()
                # print("faces: ", faces)
                inf = inference.get_meta('EmbeddingGenerator.Inference')
                inf.set_meta('bbox', list())
                if faces:
                    face_imgs = np.empty((len(faces), self.__face_size, self.__face_size, 3))
                    for i in range(len(faces)):
                        inf.get_meta('bbox').append(faces[i]['rect'])
                        face_imgs[i,:,:,:] = faces[i]['face']
                    inf.set_meta('face_image', face_imgs[0])

                    inference = FNEmbeddingsGenerator.Inference(input=face_imgs)
                    inference.set_meta('EmbeddingGenerator.Inference', inf)
                    self.generator_ip.push(inference)
                else:
                    self.__out_pipe.push((None, inf))
        self.detector.stop()
        self.generator.stop()


    def step_3(self):
        while self.__thread:
            self.generator_op.pull_wait()
            ret, inference = self.generator_op.pull(True)
            embedding = None
            if ret:
                embedding = inference.get_result()
                inference = inference.get_meta('EmbeddingGenerator.Inference')
                # print("shape emb",embedding.shape)
            self.__out_pipe.push((embedding, inference))
        self.generator.stop()

    def __run(self):
        self.session_runner.start()
        self.generator.run()
        self.detector.run()
        Thread(target=self.step_1).start()
        Thread(target=self.step_2).start()
        Thread(target=self.step_3).start()

    def run(self):
        self.__thread = Thread(target=self.__run)
        self.__thread.start()

    def stop(self):
        self.__thread = None
コード例 #6
0
class FaceDetectorMTCNN():
    class Inference(Inference):
        def __init__(self, input, return_pipe=None, meta_dict=None):
            super().__init__(input, return_pipe, meta_dict)

    def __init__(self,
                 graph_prefix=None,
                 flush_pipe_on_read=False,
                 face_prob_score=0.835,
                 face_resize=160):
        self.__face_prob_score = face_prob_score
        self.__flush_pipe_on_read = flush_pipe_on_read

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)
        self.__face_resize = face_resize

        self.__run_session_on_thread = False

        if not graph_prefix:
            self.__graph_prefix = ''
        else:
            self.__graph_prefix = graph_prefix + '/'

    def __in_pipe_process(self, inference):
        img = inference.get_input()
        inference.set_data(img)
        return inference

    def __out_pipe_process(self, result):
        result, inference = result
        inference.set_result(result)
        if inference.get_return_pipe():
            return '\0'

        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_threading(self, run_on_thread=True):
        self.__run_session_on_thread = run_on_thread

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        self.__tf_sess = session_runner.get_session()
        self.__pnet, self.__rnet, self.__onet = detect_face.create_mtcnn(
            self.__tf_sess, align_path.get())

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__out_pipe.close()
                return

            self.__in_pipe.pull_wait()
            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__job(inference)

    def __job(self, inference):
        img = inference.get_data()
        faces = []
        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = detect_face.detect_face(img, minsize, self.__pnet,
                                                    self.__rnet, self.__onet,
                                                    threshold, factor)
        if not len(bounding_boxes) == 0:
            for face in bounding_boxes:
                if face[4] > self.__face_prob_score:
                    det = np.squeeze(face[0:4])
                    bb = np.zeros(4, dtype=np.int32)
                    bb[0] = np.maximum(det[0] - margin / 2, 0)
                    bb[1] = np.maximum(det[1] - margin / 2, 0)
                    bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
                    bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
                    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                    resized = cv2.resize(
                        cropped, (self.__face_resize, self.__face_resize),
                        interpolation=cv2.INTER_CUBIC)
                    faces.append({
                        'face': resized,
                        'rect': [bb[0], bb[1], bb[2], bb[3]]
                    })
        # print("len of faces", len(faces))
        self.__out_pipe.push((faces, inference))

    def stop(self):
        self.__thread = None

    def save_for_serving(self, save_path, model_version):
        path = os.path.join(save_path, str(model_version))
        builder = tf.saved_model.builder.SavedModelBuilder(path)

        prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(
            inputs={
                'images':
                tf.saved_model.utils.build_tensor_info(
                    self.__images_placeholder),
                'phase':
                tf.saved_model.utils.build_tensor_info(
                    self.__phase_train_placeholder)
            },
            outputs={
                'embeddings':
                tf.saved_model.utils.build_tensor_info(self.__embeddings)
            },
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)

        legacy_init_op = tf.group(tf.tables_initializer(),
                                  name='legacy_init_op')

        builder.add_meta_graph_and_variables(
            self.__tf_sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'calculate_embeddings': prediction_signature,
            })

        builder.save()
        print("model saved successfullt")
コード例 #7
0
class RealSenseCamera(object):

    def __init__(self, serial_no, fps=30, hxw=(640, 480)):
        self.pipe = Pipe(limit=1)
        self.serial_no = serial_no
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.fps = fps
        self.hxw = hxw
        self.obj = None

    def __start(self):
        # initialise camera
        print('ser = ', self.serial_no)
        self.config.enable_device(self.serial_no)
        self.config.enable_stream(rs.stream.depth, self.hxw[0], self.hxw[1], rs.format.z16, self.fps)
        self.config.enable_stream(rs.stream.color, self.hxw[0], self.hxw[1], rs.format.bgr8, self.fps)


        self.pipeline.start(self.config)

        align_to = rs.stream.color
        align = rs.align(align_to)

        try:
            while True:
                # Wait for a coherent pair of frames: depth and color
                frames = self.pipeline.wait_for_frames()
                aligned_frames = align.process(frames)

                depth_frame = aligned_frames.get_depth_frame().as_depth_frame()
                color_frame = frames.get_color_frame()

                # Validate that both frames are valid
                if not depth_frame or not color_frame:
                    continue

                # Convert images to numpy arrays
                depth_image = np.asanyarray(depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                self.pipe.push_wait()
                self.pipe.push({"color_image": color_image, "depth_image": depth_image})

        finally:
            # Stop streaming
            self.pipeline.stop()

    def start(self):
        Thread(target=self.__start).start()

    def get_feed(self):
        self.pipe.pull_wait()
        feed = self.pipe.pull()
        return feed

    def stop_feed(self):
        self.pipeline.stop()

    def skip_Nframes(self, count):
        counter = 0
        while counter < count:
            feed = self.get_feed()
            counter += 1
コード例 #8
0
class TFObjectDetector:
    class Inference(Inference):
        def __init__(self, input, label_map, return_pipe=None, meta_dict=None):
            super().__init__(input, return_pipe, meta_dict)
            self.__label_map = label_map

        def get_label_map(self):
            return self.__label_map

    @staticmethod
    def __get_dir_path():
        return dirname(realpath(__file__))

    @staticmethod
    def __download_model(model_path, download_base, model_file):

        print("downloading model...", model_path)
        try:
            os.mkdir(model_path)
        except:
            pass

        opener = urllib.request.URLopener()
        opener.retrieve(download_base + model_file, model_path + model_file)
        print("finished downloading. extracting...")
        tar_file = tarfile.open(model_path + model_file)
        for file in tar_file.getmembers():
            file_name = os.path.basename(file.name)
            if 'frozen_inference_graph.pb' in file_name:
                tar_file.extract(file, model_path)
        print("finished extracting.")

    @staticmethod
    def __fetch_model_path(model_name):
        dir_path = pretrained_path.get()
        model_path = dir_path + '/'
        model_file = model_name + '.tar.gz'
        download_base = 'http://download.tensorflow.org/models/object_detection/'
        path_to_frozen_graph = model_path + model_name + '/frozen_inference_graph.pb'
        if not path.exists(path_to_frozen_graph):
            TFObjectDetector.__download_model(model_path, download_base,
                                              model_file)
        return path_to_frozen_graph

    @staticmethod
    def __fetch_category_indices(label_map):
        label_map += '.pbtxt'
        path_to_labels = os.path.join(data_path.get(), label_map)
        class_count = 90
        label_map = label_map_util.load_labelmap(path_to_labels)
        categories = label_map_util.convert_label_map_to_categories(
            label_map, max_num_classes=class_count, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)
        category_dict = {}
        for item in category_index.values():
            category_dict[item['id']] = item['name']
            category_dict[item['name']] = item['id']

        return category_index, category_dict

    @staticmethod
    def upload_label_map(label_map_path, force=False):
        path_to_labels = os.path.join(data_path.get(), label_map_path)
        if path.exists(path_to_labels):
            if not force:
                raise Exception(
                    "label map exists, skipping. use force=True to overwrite.")

        copy2(data_path.get(), label_map_path)

    def __init__(self,
                 model_name=PRETRAINED_ssd_mobilenet_v1_coco_2017_11_17,
                 label_map='mscoco_label_map',
                 image_shape=None,
                 graph_prefix=None,
                 flush_pipe_on_read=False):

        self.__category_index, self.__category_dict = self.__fetch_category_indices(
            label_map)
        self.__path_to_frozen_graph = self.__fetch_model_path(model_name)
        self.__flush_pipe_on_read = flush_pipe_on_read
        self.__image_shape = image_shape

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

        self.__run_session_on_thread = False

        if not graph_prefix:
            self.__graph_prefix = ''
        else:
            self.__graph_prefix = graph_prefix + '/'

    def __in_pipe_process(self, inference):
        images = inference.get_input()
        images = np.array(images)
        if len(images.shape) == 3:
            data = np.expand_dims(images, axis=0)
            inference.set_meta('expand_dims', True)
        else:
            data = images
            inference.set_meta('expand_dims', False)

        inference.set_data(data)
        return inference

    def __out_pipe_process(self, result):
        result, inference = result

        if inference.get_meta('expand_dims'):
            num_detections = int(result['num_detections'][0])
            detection_classes = result['detection_classes'][
                0][:num_detections].astype(np.uint8)
            detection_boxes = result['detection_boxes'][0][:num_detections]
            detection_scores = result['detection_scores'][0][:num_detections]
            if 'detection_masks' in result:
                detection_masks = result['detection_masks'][0][:num_detections]
            else:
                detection_masks = None

            results = InferedDetections(inference.get_input(),
                                        inference.get_label_map(),
                                        num_detections,
                                        detection_boxes,
                                        detection_classes,
                                        detection_scores,
                                        masks=detection_masks,
                                        is_normalized=True,
                                        get_category_fnc=self.get_category,
                                        annotator=self.annotate)

        else:
            results = []
            for i in range(len(result['num_detections'])):
                num_detections = int(result['num_detections'][i])
                detection_classes = result['detection_classes'][
                    i][:num_detections].astype(np.uint8)
                detection_boxes = result['detection_boxes'][i][:num_detections]
                detection_scores = result['detection_scores'][
                    i][:num_detections]
                if 'detection_masks' in result:
                    detection_masks = result['detection_masks'][
                        i][:num_detections]
                else:
                    detection_masks = None

                results.append(
                    InferedDetections(inference.get_input()[i],
                                      inference.get_label_map(),
                                      num_detections,
                                      detection_boxes,
                                      detection_classes,
                                      detection_scores,
                                      masks=detection_masks,
                                      is_normalized=True,
                                      get_category_fnc=self.get_category,
                                      annotator=self.annotate))

        inference.set_result(results)
        if inference.get_return_pipe():
            return '\0'

        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_threading(self, run_on_thread=True):
        self.__run_session_on_thread = run_on_thread

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        self.__tf_sess = session_runner.get_session()
        with self.__tf_sess.graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.__path_to_frozen_graph, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name=self.__graph_prefix)

        tf_default_graph = self.__tf_sess.graph

        self.__image_tensor = tf_default_graph.get_tensor_by_name(
            self.__graph_prefix + 'image_tensor:0')
        tensor_names = {
            output.name
            for op in tf_default_graph.get_operations()
            for output in op.outputs
        }
        self.__tensor_dict = {}
        for key in [
                'num_detections', 'detection_boxes', 'detection_classes',
                'detection_scores', 'detection_masks'
        ]:
            tensor_name = self.__graph_prefix + key + ':0'
            if tensor_name in tensor_names:
                self.__tensor_dict[key] = tf_default_graph.get_tensor_by_name(
                    tensor_name)
        if 'detection_masks' in self.__tensor_dict:
            # The following processing is only for single image
            detection_boxes = tf.squeeze(self.__tensor_dict['detection_boxes'],
                                         [0])
            detection_masks = tf.squeeze(self.__tensor_dict['detection_masks'],
                                         [0])
            # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
            real_num_detection = tf.cast(
                self.__tensor_dict['num_detections'][0], tf.int32)
            detection_boxes = tf.slice(detection_boxes, [0, 0],
                                       [real_num_detection, -1])
            detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                       [real_num_detection, -1, -1])

            detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                detection_masks, detection_boxes, self.__image_shape[0],
                self.__image_shape[1])
            detection_masks_reframed = tf.cast(
                tf.greater(detection_masks_reframed, 0.5), tf.uint8)
            # Follow the convention by adding back the batch dimension
            self.__tensor_dict['detection_masks'] = tf.expand_dims(
                detection_masks_reframed, 0)

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__out_pipe.close()
                return

            self.__in_pipe.pull_wait()
            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__session_runner.get_in_pipe().push(
                    SessionRunnable(
                        self.__job,
                        inference,
                        run_on_thread=self.__run_session_on_thread))

    def __job(self, inference):
        self.__out_pipe.push((self.__tf_sess.run(
            self.__tensor_dict,
            feed_dict={self.__image_tensor: inference.get_data()}), inference))

    def get_category(self, category):
        return self.__category_dict[category]

    @staticmethod
    def annotate(inferred_detections):
        annotated = inferred_detections.image.copy()
        vis_util.visualize_boxes_and_labels_on_image_array(
            annotated,
            inferred_detections.get_boxes_tlbr(),
            inferred_detections.get_classes().astype(np.int32),
            inferred_detections.get_scores(),
            TFObjectDetector.__fetch_category_indices(
                inferred_detections.get_label_map())[0],
            instance_masks=inferred_detections.get_masks(),
            use_normalized_coordinates=True,
            line_thickness=1)
        return annotated