Esempio n. 1
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # Get image from container
        in_img = in_img_c.image()

        # convert generic image to PIL image
        pil_image = get_pil_image(in_img)

        # draw on the image to prove we can do it
        num = 37
        import PIL.ImageDraw
        draw = PIL.ImageDraw.Draw(pil_image)
        draw.line((0, 0) + pil_image.size, fill=128, width=5)
        draw.line((0, pil_image.size[1], pil_image.size[0], 0),
                  fill=32768,
                  width=5)
        #                 x0   y0   x1       y1
        draw.rectangle([num, num, num + 100, num + 100], outline=125)
        del draw

        new_image = from_pil(pil_image)  # get new image handle
        new_ic = ImageContainer(new_image)

        # push object to output port
        self.push_to_port_using_trait('out_image', new_ic)

        self._base_step()
Esempio n. 2
0
    def _step(self):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # If we're in test mode, just grab the image and
        # push a fake descriptor without trying to use
        # smqtk.
        if not apply_descriptor_test_mode:
            # Get image from conatiner
            in_img = in_img_c.image()


            # convert generic image to PIL image
            pil_image = get_pil_image(in_img)
            pix = np.array(pil_image)

            # get image in acceptable format
            # TBD use in memory transfer
            pil_image.save( "file.png" )
            test_data = DataFileElement("file.png")

            result = self.generator.compute_descriptor(test_data, self.factory)
            desc_list = result.vector().tolist()

            # push list to output port
            self.push_to_port_using_trait( 'vector', desc_list )
        else:
            desc_list =  4096 * [0.223] # Create  fake descriptor in test mode
            self.push_to_port_using_trait('vector', desc_list)

        self._base_step()
Esempio n. 3
0
    def _step(self):
        try:
            # Grab image container from port using traits
            in_img_c = self.grab_input_using_trait('image')
            timestamp = self.grab_input_using_trait('timestamp')
            dos_ptr = self.grab_input_using_trait('detected_object_set')
            print('timestamp = {!r}'.format(timestamp))

            # Get current frame and give it to app feature extractor
            im = get_pil_image(in_img_c.image())
            self._app_feature_extractor.frame = im

            bbox_num = 0

            # Get detection bbox
            dos = dos_ptr.select(self._select_threshold)
            bbox_num = dos.size()

            det_obj_set = DetectedObjectSet()

            if bbox_num == 0:
                print(
                    '!!! No bbox is provided on this frame and skip this frame !!!'
                )
            else:
                # appearance features (format: pytorch tensor)
                app_f_begin = timer()
                pt_app_features = self._app_feature_extractor(dos, False)
                app_f_end = timer()
                print('%%%app feature eclapsed time: {}'.format(app_f_end -
                                                                app_f_begin))

                # get new track state from new frame and detections
                for idx, item in enumerate(dos):
                    bbox = item.bounding_box()
                    fid = timestamp.get_frame()
                    ts = timestamp.get_time_usec()
                    d_obj = item

                    # store app feature to detectedObject
                    app_f = new_descriptor(pt_app_features[idx].numpy().size)
                    app_f[:] = pt_app_features[idx].numpy()
                    # print( pt_app_features[idx].numpy() )
                    d_obj.set_descriptor(app_f)
                    det_obj_set.add(d_obj)

            # push track set to output port
            self.push_to_port_using_trait('detected_object_set', det_obj_set)
            self._base_step()

        except BaseException as e:
            print(repr(e))
            import traceback
            print(traceback.format_exc())
            sys.stdout.flush()
            raise
Esempio n. 4
0
    def detect(self, in_img_c):

        import tensorflow as tf
        import humanfriendly

        image_height = in_img_c.height()
        image_width = in_img_c.width()

        if (self.norm_image_type and self.norm_image_type != "none"):
            print("Normalizing input image")

            in_img = in_img_c.image().asarray().astype("uint16")

            bottom, top = self.get_scaling_values(self.norm_image_type, in_img,
                                                  image_height)
            in_img = self.lin_normalize_image(in_img, bottom, top)

            in_img = np.tile(in_img, (1, 1, 3))
        else:
            in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

        start_time = time.time()
        boxes, scores, classes = self.generate_detection(
            self.detection_graph, in_img)
        elapsed = time.time() - start_time
        print("Done running detector in {}".format(
            humanfriendly.format_timespan(elapsed)))

        good_boxes = []
        detections = DetectedObjectSet()

        for i in range(0, len(scores)):
            if (scores[i] >= self.confidence_thresh):
                bbox = boxes[i]
                good_boxes.append(bbox)

                top_rel = bbox[0]
                left_rel = bbox[1]
                bottom_rel = bbox[2]
                right_rel = bbox[3]

                xmin = left_rel * image_width
                ymin = top_rel * image_height
                xmax = right_rel * image_width
                ymax = bottom_rel * image_height

                dot = DetectedObjectType(self.category_name, scores[i])
                obj = DetectedObject(BoundingBoxD(xmin, ymin, xmax, ymax),
                                     scores[i], dot)
                detections.add(obj)

        print("Detected {}".format(len(good_boxes)))
        return detections
Esempio n. 5
0
    def _step(self):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')
        tracks = self.grab_input_using_trait('object_track_set')

        # Get python image from conatiner (just for show)
        in_img = get_pil_image(in_img_c.image()).convert('RGB')

        if len(tracks.tracks()) == 0:
            # Fill image
            in_img = pil_image.new(mode='RGB',
                                   size=in_img.size,
                                   color=(randint(0, 255), randint(0, 255),
                                          randint(0, 255)))

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait('image',
                                      ImageContainer(from_pil(in_img)))

        self._base_step()
Esempio n. 6
0
    def _step(self):
        try:
            def timing(desc, f):
                """Return f(), printing a message about how long it took"""
                start = timer()
                result = f()
                end = timer()
                print('%%%', desc, ' elapsed time: ', end - start, sep='')
                return result

            print('step', self._step_id)

            # grab image container from port using traits
            in_img_c = self.grab_input_using_trait('image')
            timestamp = self.grab_input_using_trait('timestamp')
            dos_ptr = self.grab_input_using_trait('detected_object_set')
            print('timestamp =', repr(timestamp))

            # Get current frame
            im = get_pil_image(in_img_c.image()).convert('RGB')

            # Get detection bbox
            if self._gtbbox_flag:
                dos = self._m_bbox[self._step_id]
                bbox_num = len(dos)
            else:
                dos = dos_ptr.select(self._select_threshold)
                bbox_num = dos.size()
            #print('bbox list len is', dos.size())

            det_obj_set = DetectedObjectSet()
            if bbox_num == 0:
                print('!!! No bbox is provided on this frame.  Skipping this frame !!!')
            else:
                # interaction features
                grid_feature_list = timing('grid feature', lambda:
                                           self._grid(im.size, dos, self._gtbbox_flag))

                # appearance features (format: pytorch tensor)
                pt_app_features = timing('app feature', lambda:
                                         self._app_feature_extractor(im, dos, self._gtbbox_flag))

                track_state_list = []
                next_track_id = int(self._track_set.get_max_track_id()) + 1

                # get new track state from new frame and detections
                for idx, item in enumerate(dos):
                    if self._gtbbox_flag:
                        bbox = item
                        fid = self._step_id
                        ts = self._step_id
                        d_obj = DetectedObject(bbox=item, confidence=1.0)
                    else:
                        bbox = item.bounding_box()
                        fid = timestamp.get_frame()
                        ts = timestamp.get_time_usec()
                        d_obj = item

                    if self._add_features_to_detections:
                        # store app feature to detected_object
                        app_f = new_descriptor(g_config.A_F_num)
                        app_f[:] = pt_app_features[idx].numpy()
                        d_obj.set_descriptor(app_f)
                    det_obj_set.add(d_obj)

                    # build track state for current bbox for matching
                    cur_ts = track_state(frame_id=self._step_id,
                                        bbox_center=bbox.center(),
                                        interaction_feature=grid_feature_list[idx],
                                        app_feature=pt_app_features[idx],
                                        bbox=[int(bbox.min_x()), int(bbox.min_y()),
                                              int(bbox.width()), int(bbox.height())],
                                        detected_object=d_obj,
                                        sys_frame_id=fid, sys_frame_time=ts)
                    track_state_list.append(cur_ts)

                # if there are no tracks, generate new tracks from the track_state_list
                if not self._track_flag:
                    next_track_id = self._track_set.add_new_track_state_list(next_track_id,
                                    track_state_list, self._track_initialization_threshold)
                    self._track_flag = True
                else:
                    # check whether we need to terminate a track
                    for track in list(self._track_set.iter_active()):
                        # terminating a track based on readin_frame_id or original_frame_id gap
                        if (self._step_id - track[-1].frame_id > self._terminate_track_threshold
                            or fid - track[-1].sys_frame_id > self._sys_terminate_track_threshold):
                            self._track_set.deactivate_track(track)


                    # call IOU tracker
                    if self._IOU_flag:
                        self._track_set, track_state_list = timing('IOU tracking', lambda: (
                            self._iou_tracker(self._track_set, track_state_list)
                        ))

                    #print('***track_set len', len(self._track_set))
                    #print('***track_state_list len', len(track_state_list))

                    # estimate similarity matrix
                    similarity_mat, track_idx_list = timing('SRNN association', lambda: (
                        self._srnn_matching(self._track_set, track_state_list, self._ts_threshold)
                    ))

                    # reset update_flag
                    self._track_set.reset_updated_flag()

                    # Hungarian algorithm
                    row_idx_list, col_idx_list = timing('Hungarian algorithm', lambda: (
                        sp.optimize.linear_sum_assignment(similarity_mat)
                    ))

                    for i in range(len(row_idx_list)):
                        r = row_idx_list[i]
                        c = col_idx_list[i]

                        if -similarity_mat[r, c] < self._similarity_threshold:
                            # initialize a new track
                            if (track_state_list[c].detected_object.confidence()
                                   >= self._track_initialization_threshold):
                                self._track_set.add_new_track_state(next_track_id,
                                        track_state_list[c])
                                next_track_id += 1
                        else:
                            # add to existing track
                            self._track_set.update_track(track_idx_list[r], track_state_list[c])

                    # for the remaining unmatched track states, we initialize new tracks
                    if len(track_state_list) - len(col_idx_list) > 0:
                        for i in range(len(track_state_list)):
                            if (i not in col_idx_list
                                and (track_state_list[i].detected_object.confidence()
                                     >= self._track_initialization_threshold)):
                                self._track_set.add_new_track_state(next_track_id,
                                        track_state_list[i])
                                next_track_id += 1

                print('total tracks', len(self._track_set))

            # push track set to output port
            ot_list = ts2ot_list(self._track_set)
            ots = ObjectTrackSet(ot_list)

            self.push_to_port_using_trait('object_track_set', ots)
            self.push_to_port_using_trait('detected_object_set', det_obj_set)

            self._step_id += 1

            self._base_step()

        except BaseException as e:
            print( repr( e ) )
            import traceback
            print( traceback.format_exc() )
            sys.stdout.flush()
            raise
    def _step(self):
        try:
            # Grab image container from port using traits
            in_img_c = self.grab_input_using_trait('image')
            dos_ptr = self.grab_input_using_trait('detected_object_set')

            # Declare outputs
            new_positive_descriptors = []
            new_positive_ids = []
            new_negative_descriptors = []
            new_negative_ids = []

            # Get detection bbox
            dos = dos_ptr.select(self._select_threshold)
            bbox_num = dos.size()

            # Make sure we have at least some detections
            app_f_begin = timer()
            if bbox_num != 0:
                im = get_pil_image(in_img_c.image())
                self._app_feature_extractor.frame = im
                pt_app_features = self._app_feature_extractor(dos)
                for item in pt_app_features:
                    new_positive_descriptors.append(item.numpy())
                    new_positive_ids.append(self._get_uid("scale_and_shift"))

            app_f_end = timer()
            print('%%%aug app feature eclapsed time: {}'.format(app_f_end -
                                                                app_f_begin))

            # Get negative descriptors via distance function
            neg_per_pos = int(self._negative_sample_count /
                              len(new_positive_descriptors))

            if neg_per_pos <= 0:
                neg_per_pos = 1

            if self._use_hist:
                for nd in new_positive_descriptors:
                    # Do distance func in database
                    output, indxs = self._hist_tree.query(np.reshape(
                        nd, np.size(nd)),
                                                          k=neg_per_pos)
                    # Make new negative entries
                    for ind in indxs:
                        new_negative_descriptors.append(self._hist_data[ind])
                        new_negative_ids.append(self._get_uid("db_neg"))

            # push outputs
            vital_pos_descriptors = to_vital(new_positive_descriptors)
            vital_neg_descriptors = to_vital(new_negative_descriptors)

            self.push_to_port_using_trait('new_positive_descriptors',
                                          vital_pos_descriptors)
            self.push_to_port_using_trait('new_positive_ids',
                                          datum.VectorString(new_positive_ids))
            self.push_to_port_using_trait('new_negative_descriptors',
                                          vital_neg_descriptors)
            self.push_to_port_using_trait('new_negative_ids',
                                          datum.VectorString(new_negative_ids))
            self._base_step()

        except BaseException as e:
            print(repr(e))
            import traceback
            print(traceback.format_exc())
            sys.stdout.flush()
            raise