def prepare_input_stream(self): """ prepares Input Stream stream types: 'video','image','ros' gets called by prepare model """ if self.input_type is 'video': self._is_videoD = True self._input_stream = VideoStream(self.config.VIDEO_INPUT, self.config.WIDTH, self.config.HEIGHT).start() self.stream_height = self._input_stream.real_height self.stream_width = self._input_stream.real_width elif self.input_type is 'image': self._is_imageD = True self._input_stream = ImageStream( self.config.IMAGE_PATH, self.config.LIMIT_IMAGES, (self.config.WIDTH, self.config.HEIGHT)).start() self.stream_height = self.config.HEIGHT self.stream_width = self.config.WIDTH elif self.input_type is 'ros': self.prepare_ros('detection_node') # Timeliner for image detection if self.config.WRITE_TIMELINE: self.prepare_timeliner()
def prepare_model(self, input_type): """ prepares DeepLab model input_type: must be 'image' or 'video' """ assert input_type in ['image', 'video'], "only image or video input possible" super(DeepLabModel, self).prepare_model() self.input_type = input_type # fixed input sizes as model needs resize either way # Input configurations self.category_index = None if self.input_type is 'video': self._input_stream = VideoStream(self.config.VIDEO_INPUT, self.config.WIDTH, self.config.HEIGHT).start() elif self.input_type is 'image': self._input_stream = ImageStream(self.config.IMAGE_PATH, self.config.LIMIT_IMAGES).start() if self.config.WRITE_TIMELINE: self._run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) self._run_metadata = tf.RunMetadata() self._timeliner = TimeLiner() print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph, config=self._tf_config) as self._sess: return self
def prepare_input_stream(self): if self.input_type is 'video': self._is_videoD = True self._input_stream = VideoStream(self.config.VIDEO_INPUT,self.config.WIDTH,self.config.HEIGHT).start() elif self.input_type is 'image': self._is_imageD = True self._input_stream = ImageStream(self.config.IMAGE_PATH,self.config.LIMIT_IMAGES).start() if self.config.WRITE_TIMELINE: self.prepare_timeliner() elif self._input_type is 'ros': self.prepare_ros('deeplab_node')
class DeepLabModel(Model): def __init__(self,config): super(DeepLabModel, self).__init__(config) def prepare_input_stream(self): if self.input_type is 'video': self._is_videoD = True self._input_stream = VideoStream(self.config.VIDEO_INPUT,self.config.WIDTH,self.config.HEIGHT).start() elif self.input_type is 'image': self._is_imageD = True self._input_stream = ImageStream(self.config.IMAGE_PATH,self.config.LIMIT_IMAGES).start() if self.config.WRITE_TIMELINE: self.prepare_timeliner() elif self._input_type is 'ros': self.prepare_ros('deeplab_node') def prepare_model(self,input_type): """ prepares DeepLab model input_type: must be 'image', 'video', or 'ros' """ assert input_type in ['image','video','ros'], "only image, video or ros input possible" super(DeepLabModel, self).prepare_model() self.input_type = input_type # Tracker if self.config.USE_TRACKER: self.prepare_tracker() # Input Stream self.category_index = None self.prepare_input_stream() print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph,config=self._tf_config) as self._sess: return self def detect(self): """ DeepLab Detection function """ if not (self.config.USE_TRACKER and self._track): self.frame = self._input_stream.read() height,width,_ = self.frame.shape resize_ratio = 1.0 * 513 / max(self._input_stream.real_width,self._input_stream.real_height) target_size = (int(resize_ratio * self._input_stream.real_width),int(resize_ratio * self._input_stream.real_height)) #(513, 342)?(513,384) self.frame = self._visualizer.resize_image(self.frame, target_size) batch_seg_map = self._sess.run('SemanticPredictions:0', feed_dict={'ImageTensor:0': [self._visualizer.convertRGB_image(self.frame)]}, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE: self._timeliner.write_timeline(self._run_metadata.step_stats, '{}/timeline_{}.json'.format( self.config.RESULT_PATH,self.config.DISPLAY_NAME)) seg_map = batch_seg_map[0] self.boxes = [] self.labels = [] self.ids = [] if self.config.BBOX: map_labeled = measure.label(seg_map, connectivity=1) for region in measure.regionprops(map_labeled): if region.area > self.config.MINAREA: box = region.bbox id = seg_map[tuple(region.coords[0])] label = self.config.LABEL_NAMES[id] self.boxes.append(box) self.labels.append(label) self.ids.append(id) # deeplab workaround self.num = len(self.boxes) self.classes = self.ids self.scores = self.labels self.masks = seg_map # Activate Tracker if self.config.USE_TRACKER and not self._is_imageD: self.activate_tracker() else: self.run_tracker() # publish ros if self._is_rosD: self._ros_publisher.publish(self.boxes,self.labels,self.masks,self.frame.shape,self.fps.fps_local())
class ObjectDetectionModel(Model): """ object_detection model class """ def __init__(self,config): super(ObjectDetectionModel, self).__init__(config) def prepare_input_stream(self): """ prepares Input Stream stream types: 'video','image','ros' gets called by prepare model """ if self.input_type is 'video': self._is_videoD = True self._input_stream = VideoStream(self.config.VIDEO_INPUT,self.config.WIDTH, self.config.HEIGHT).start() self.stream_height = self._input_stream.real_height self.stream_width = self._input_stream.real_width elif self.input_type is 'image': self._is_imageD = True self._input_stream = ImageStream(self.config.IMAGE_PATH,self.config.LIMIT_IMAGES, (self.config.WIDTH,self.config.HEIGHT)).start() self.stream_height = self.config.HEIGHT self.stream_width = self.config.WIDTH elif self.input_type is 'ros': self.prepare_ros('detection_node') # Timeliner for image detection if self.config.WRITE_TIMELINE: self.prepare_timeliner() def prepare_model(self,input_type): """ prepares Object_Detection model input_type: must be 'image', 'video', or 'ros' """ assert input_type in ['image','video','ros'], "only 'image','video' and 'ros' input possible" super(ObjectDetectionModel, self).prepare_model() self.input_type = input_type # Tracker if self.config.USE_TRACKER: self.prepare_tracker() print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph,config=self._tf_config) as self._sess: # Prepare Input Stream self.prepare_input_stream() # Define Input and Ouput tensors self._tensor_dict = self.get_tensor_dict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']) self._image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0') # Mask Transformations if 'detection_masks' in self._tensor_dict: # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. detection_boxes = tf.squeeze(self._tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze(self._tensor_dict['detection_masks'], [0]) real_num_detection = tf.cast(self._tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = reframe_box_masks_to_image_masks(detection_masks, detection_boxes,self.stream_height,self.stream_width) detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8) self._tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0) if self.config.SPLIT_MODEL: self._score_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[0])) self._expand_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[1])) self._score_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[0])) self._expand_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[1])) # Threading self._gpu_worker = SessionWorker("GPU",self.detection_graph,self._tf_config) self._cpu_worker = SessionWorker("CPU",self.detection_graph,self._tf_config) self._gpu_opts = [self._score_out,self._expand_out] self._cpu_opts = [self._tensor_dict['detection_boxes'], self._tensor_dict['detection_scores'], self._tensor_dict['detection_classes'], self._tensor_dict['num_detections']] return self def run_default_sess(self): """ runs default session """ # default session) self.frame = self._input_stream.read() output_dict = self._sess.run(self._tensor_dict, feed_dict={self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame)}, options=self._run_options, run_metadata=self._run_metadata) self.num = output_dict['num_detections'][0] self.classes = output_dict['detection_classes'][0] self.boxes = output_dict['detection_boxes'][0] self.scores = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: self.masks = output_dict['detection_masks'][0] def run_thread_sess(self): """ runs seperate gpu and cpu session threads """ if self._gpu_worker.is_sess_empty(): # put new queue self.frame = self._input_stream.read() gpu_feeds = {self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame)} if self.config.VISUALIZE: gpu_extras = self.frame # for visualization frame else: gpu_extras = None self._gpu_worker.put_sess_queue(self._gpu_opts,gpu_feeds,gpu_extras) g = self._gpu_worker.get_result_queue() if g is None: # gpu thread has no output queue. ok skip, let's check cpu thread. pass else: # gpu thread has output queue. score,expand,self._frame = g["results"][0],g["results"][1],g["extras"] if self._cpu_worker.is_sess_empty(): # When cpu thread has no next queue, put new queue. # else, drop gpu queue. cpu_feeds = {self._score_in: score, self._expand_in: expand} cpu_extras = self.frame self._cpu_worker.put_sess_queue(self._cpu_opts,cpu_feeds,cpu_extras) c = self._cpu_worker.get_result_queue() if c is None: # cpu thread has no output queue. ok, nothing to do. continue self._wait_thread = True return # If CPU RESULT has not been set yet, no fps update else: self._wait_thread = False self.boxes,self.scores,self.classes,self.num,self.frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"] def run_split_sess(self): """ runs split session WITHOUT threading optional: timeline writer """ self.frame = self._input_stream.read() score, expand = self._sess.run(self._gpu_opts,feed_dict={self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame)}, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE: self.timeliner.write_timeline(self._run_metadata.step_stats, '{}/timeline_{}_SM1.json'.format( self.config.RESULT_PATH,self.config.DISPLAY_NAME)) # CPU Session self.boxes,self.scores,self.classes,self.num = self._sess.run(self._cpu_opts, feed_dict={self._score_in:score, self._expand_in: expand}, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE: self.timeliner.write_timeline(self._run_metadata.step_stats, '{}/timeline_{}_SM2.json'.format( self.config.RESULT_PATH,self.config.DISPLAY_NAME)) def reformat_detection(self): """ reformats detection """ self.num = int(self.num) self.boxes = np.squeeze(self.boxes) self.classes = np.squeeze(self.classes).astype(np.uint8) self.scores = np.squeeze(self.scores) def detect(self): """ Object_Detection Detection function optional: multi threading split session, timline writer """ if not (self.config.USE_TRACKER and self._track): if self.config.SPLIT_MODEL: if self.config.MULTI_THREADING: self.run_thread_sess() if self._wait_thread: # checks if thread has output return else: self.run_split_sess() else: self.run_default_sess() if self.config.WRITE_TIMELINE: self.timeliner.write_timeline(self._run_metadata.step_stats, '{}/timeline_{}.json'.format( self.config.RESULT_PATH,self.config.DISPLAY_NAME)) self.reformat_detection() # Activate Tracker if self.config.USE_TRACKER and not self._is_imageD: self.activate_tracker() # Tracking else: self.run_tracker() # Publish ROS Message if self._is_rosD: self._ros_publisher.publish(self.boxes,self.scores,self.classes,self.num,self.category_index,self.frame.shape,self.masks,self.fps.fps_local())
class DeepLabModel(Model): def __init__(self, config): super(DeepLabModel, self).__init__(config) def prepare_model(self, input_type): """ prepares DeepLab model input_type: must be 'image' or 'video' """ assert input_type in ['image', 'video'], "only image or video input possible" super(DeepLabModel, self).prepare_model() self.input_type = input_type # fixed input sizes as model needs resize either way # Input configurations self.category_index = None if self.input_type is 'video': self._input_stream = VideoStream(self.config.VIDEO_INPUT, self.config.WIDTH, self.config.HEIGHT).start() elif self.input_type is 'image': self._input_stream = ImageStream(self.config.IMAGE_PATH, self.config.LIMIT_IMAGES).start() if self.config.WRITE_TIMELINE: self._run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) self._run_metadata = tf.RunMetadata() self._timeliner = TimeLiner() print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph, config=self._tf_config) as self._sess: return self def detect(self): """ DeepLab Detection function """ self.frame = self._input_stream.read() height, width, _ = self.frame.shape resize_ratio = 1.0 * 513 / max(self._input_stream.real_width, self._input_stream.real_height) target_size = (int(resize_ratio * self._input_stream.real_width), int(resize_ratio * self._input_stream.real_height) ) #(513, 342)?(513,384) self.frame = self._visualizer.resize_image(self.frame, target_size) batch_seg_map = self._sess.run( 'SemanticPredictions:0', feed_dict={ 'ImageTensor:0': [self._visualizer.convertRGB_image(self.frame)] }, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE and self.input_type is 'image': self._timeliner.write_timeline( self._run_metadata.step_stats, '{}/timeline_{}.json'.format(self.config.RESULT_PATH, self.config.DISPLAY_NAME)) seg_map = batch_seg_map[0] self.boxes = [] self.labels = [] self.ids = [] if self.config.BBOX: map_labeled = measure.label(seg_map, connectivity=1) for region in measure.regionprops(map_labeled): if region.area > self.config.MINAREA: box = region.bbox id = seg_map[tuple(region.coords[0])] label = self.config.LABEL_NAMES[id] self.boxes.append(box) self.labels.append(label) self.ids.append(id) # workaround self.num = len(self.boxes) self.classes = self.ids self.scores = self.labels self.masks = seg_map
def prepare_model(self, input_type): """ prepares Object_Detection model input_type: must be 'image' or 'video' """ assert input_type in ['image', 'video' ], "only 'image' or 'video' input possible" super(ObjectDetectionModel, self).prepare_model() self.input_type = input_type # Tracker if self.config.USE_TRACKER: sys.path.append(os.getcwd() + '/rod/kcf') import KCF self._tracker = KCF.kcftracker(False, True, False, False) self._tracker_counter = 0 self._track = False print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph, config=self._tf_config) as self._sess: # Input Configuration if self.input_type is 'video': self._input_stream = VideoStream( self.config.VIDEO_INPUT, self.config.WIDTH, self.config.HEIGHT).start() height = self._input_stream.real_height width = self._input_stream.real_width elif self.input_type is 'image': self._input_stream = ImageStream( self.config.IMAGE_PATH, self.config.LIMIT_IMAGES, (self.config.WIDTH, self.config.HEIGHT)).start() height = self.config.HEIGHT width = self.config.WIDTH # Timeliner for image detection if self.config.WRITE_TIMELINE: self._run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) self._run_metadata = tf.RunMetadata() self.timeliner = TimeLiner() # Define Input and Ouput tensors self._tensor_dict = self.get_tensordict([ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]) self._image_tensor = self.detection_graph.get_tensor_by_name( 'image_tensor:0') # Mask Transformations if 'detection_masks' in self._tensor_dict: # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. detection_boxes = tf.squeeze( self._tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze( self._tensor_dict['detection_masks'], [0]) real_num_detection = tf.cast( self._tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = reframe_box_masks_to_image_masks( detection_masks, detection_boxes, height, width) detection_masks_reframed = tf.cast( tf.greater(detection_masks_reframed, 0.5), tf.uint8) self._tensor_dict['detection_masks'] = tf.expand_dims( detection_masks_reframed, 0) if self.config.SPLIT_MODEL: self._score_out = self.detection_graph.get_tensor_by_name( '{}:0'.format(self.config.SPLIT_NODES[0])) self._expand_out = self.detection_graph.get_tensor_by_name( '{}:0'.format(self.config.SPLIT_NODES[1])) self._score_in = self.detection_graph.get_tensor_by_name( '{}_1:0'.format(self.config.SPLIT_NODES[0])) self._expand_in = self.detection_graph.get_tensor_by_name( '{}_1:0'.format(self.config.SPLIT_NODES[1])) # Threading self._gpu_worker = SessionWorker("GPU", self.detection_graph, self._tf_config) self._cpu_worker = SessionWorker("CPU", self.detection_graph, self._tf_config) self._gpu_opts = [self._score_out, self._expand_out] self._cpu_opts = [ self._tensor_dict['detection_boxes'], self._tensor_dict['detection_scores'], self._tensor_dict['detection_classes'], self._tensor_dict['num_detections'] ] return self
class ObjectDetectionModel(Model): """ object_detection model class """ def __init__(self, config): super(ObjectDetectionModel, self).__init__(config) def prepare_model(self, input_type): """ prepares Object_Detection model input_type: must be 'image' or 'video' """ assert input_type in ['image', 'video' ], "only 'image' or 'video' input possible" super(ObjectDetectionModel, self).prepare_model() self.input_type = input_type # Tracker if self.config.USE_TRACKER: sys.path.append(os.getcwd() + '/rod/kcf') import KCF self._tracker = KCF.kcftracker(False, True, False, False) self._tracker_counter = 0 self._track = False print("> Building Graph") with self.detection_graph.as_default(): with tf.Session(graph=self.detection_graph, config=self._tf_config) as self._sess: # Input Configuration if self.input_type is 'video': self._input_stream = VideoStream( self.config.VIDEO_INPUT, self.config.WIDTH, self.config.HEIGHT).start() height = self._input_stream.real_height width = self._input_stream.real_width elif self.input_type is 'image': self._input_stream = ImageStream( self.config.IMAGE_PATH, self.config.LIMIT_IMAGES, (self.config.WIDTH, self.config.HEIGHT)).start() height = self.config.HEIGHT width = self.config.WIDTH # Timeliner for image detection if self.config.WRITE_TIMELINE: self._run_options = tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE) self._run_metadata = tf.RunMetadata() self.timeliner = TimeLiner() # Define Input and Ouput tensors self._tensor_dict = self.get_tensordict([ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]) self._image_tensor = self.detection_graph.get_tensor_by_name( 'image_tensor:0') # Mask Transformations if 'detection_masks' in self._tensor_dict: # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. detection_boxes = tf.squeeze( self._tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze( self._tensor_dict['detection_masks'], [0]) real_num_detection = tf.cast( self._tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = reframe_box_masks_to_image_masks( detection_masks, detection_boxes, height, width) detection_masks_reframed = tf.cast( tf.greater(detection_masks_reframed, 0.5), tf.uint8) self._tensor_dict['detection_masks'] = tf.expand_dims( detection_masks_reframed, 0) if self.config.SPLIT_MODEL: self._score_out = self.detection_graph.get_tensor_by_name( '{}:0'.format(self.config.SPLIT_NODES[0])) self._expand_out = self.detection_graph.get_tensor_by_name( '{}:0'.format(self.config.SPLIT_NODES[1])) self._score_in = self.detection_graph.get_tensor_by_name( '{}_1:0'.format(self.config.SPLIT_NODES[0])) self._expand_in = self.detection_graph.get_tensor_by_name( '{}_1:0'.format(self.config.SPLIT_NODES[1])) # Threading self._gpu_worker = SessionWorker("GPU", self.detection_graph, self._tf_config) self._cpu_worker = SessionWorker("CPU", self.detection_graph, self._tf_config) self._gpu_opts = [self._score_out, self._expand_out] self._cpu_opts = [ self._tensor_dict['detection_boxes'], self._tensor_dict['detection_scores'], self._tensor_dict['detection_classes'], self._tensor_dict['num_detections'] ] return self def run_default_sess(self): """ runs default session """ # default session) self.frame = self._input_stream.read() output_dict = self._sess.run( self._tensor_dict, feed_dict={ self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame) }, options=self._run_options, run_metadata=self._run_metadata) self.num = output_dict['num_detections'][0] self.classes = output_dict['detection_classes'][0] self.boxes = output_dict['detection_boxes'][0] self.scores = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: self.masks = output_dict['detection_masks'][0] def run_thread_sess(self): """ runs seperate gpu and cpu session threads """ if self._gpu_worker.is_sess_empty(): # put new queue self.frame = self._input_stream.read() gpu_feeds = { self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame) } if self.config.VISUALIZE: gpu_extras = self.frame # for visualization frame else: gpu_extras = None self._gpu_worker.put_sess_queue(self._gpu_opts, gpu_feeds, gpu_extras) g = self._gpu_worker.get_result_queue() if g is None: # gpu thread has no output queue. ok skip, let's check cpu thread. pass else: # gpu thread has output queue. score, expand, self._frame = g["results"][0], g["results"][1], g[ "extras"] if self._cpu_worker.is_sess_empty(): # When cpu thread has no next queue, put new queue. # else, drop gpu queue. cpu_feeds = {self._score_in: score, self._expand_in: expand} cpu_extras = self.frame self._cpu_worker.put_sess_queue(self._cpu_opts, cpu_feeds, cpu_extras) c = self._cpu_worker.get_result_queue() if c is None: # cpu thread has no output queue. ok, nothing to do. continue self._wait_thread = True return # If CPU RESULT has not been set yet, no fps update else: self._wait_thread = False self.boxes, self.scores, self.classes, self.num, self.frame = c[ "results"][0], c["results"][1], c["results"][2], c["results"][ 3], c["extras"] def run_split_sess(self): """ runs split session WITHOUT threading optional: timeline writer """ self.frame = self._input_stream.read() score, expand = self._sess.run( self._gpu_opts, feed_dict={ self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame) }, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE: self.timeliner.write_timeline( self._run_metadata.step_stats, '{}/timeline_{}_SM1.json'.format(self.config.RESULT_PATH, self.config.DISPLAY_NAME)) # CPU Session self.boxes, self.scores, self.classes, self.num = self._sess.run( self._cpu_opts, feed_dict={ self._score_in: score, self._expand_in: expand }, options=self._run_options, run_metadata=self._run_metadata) if self.config.WRITE_TIMELINE: self.timeliner.write_timeline( self._run_metadata.step_stats, '{}/timeline_{}_SM2.json'.format(self.config.RESULT_PATH, self.config.DISPLAY_NAME)) def run_tracker(self): """ runs KCF tracker on videoStream frame !does not work on images, obviously! """ self.frame = self._input_stream.read() if self._first_track: self._trackers = [] self._tracker_boxes = self.boxes for box in self.boxes[~np.all(self.boxes == 0, axis=1)]: self._tracker.init( conv_detect2track(box, self._input_stream.real_width, self._input_stream.real_height), self.tracker_frame) self._trackers.append(self._tracker) self._first_track = False for idx, self._tracker in enumerate(self._trackers): tracker_box = self._tracker.update(self.frame) self._tracker_boxes[idx, :] = conv_track2detect( tracker_box, self._input_stream.real_width, self._input_stream.real_height) self._tracker_counter += 1 self.boxes = self._tracker_boxes # Deactivate Tracker if self._tracker_counter >= self.config.TRACKER_FRAMES: self._track = False self._tracker_counter = 0 def reformat_detection(self): """ reformats detection """ self.num = int(self.num) self.boxes = np.squeeze(self.boxes) self.classes = np.squeeze(self.classes).astype(np.uint8) self.scores = np.squeeze(self.scores) def detect(self): """ Object_Detection Detection function optional: multi threading split session, timline writer """ if not (self.config.USE_TRACKER and self._track): if self.config.SPLIT_MODEL: if self.config.MULTI_THREADING: self.run_thread_sess() if self._wait_thread: # checks if thread has output return else: self.run_split_sess() else: self.run_default_sess() if self.config.WRITE_TIMELINE and self.input_type is 'image': self.timeliner.write_timeline( self._run_metadata.step_stats, '{}/timeline_{}.json'.format(self.config.RESULT_PATH, self.config.DISPLAY_NAME)) self.reformat_detection() # Activate Tracker if self.config.USE_TRACKER and self.num <= self.config.NUM_TRACKERS and self.input_type is 'video': self.tracker_frame = self.frame self._track = True self._first_track = True # Tracking else: self.run_tracker()