Beispiel #1
0
 def test_new(self):
     # Attempt construction using a bunch of random, non-zero integers
     random.seed(0)
     for i in range(100):
         n = random.randint(1, 4096)
         new_descriptor(n, 'd')
         new_descriptor(n, 'f')
Beispiel #2
0
 def test_new_with_descriptors(self):
     # Try creating a descriptor set with multiple descriptors as input.
     descriptor_list = [
         new_descriptor(1),
         new_descriptor(1),
         new_descriptor(1),
     ]
     ds = DescriptorSet(descriptor_list)
Beispiel #3
0
    def test_num_bytes(self):
        # While not calling the C function, it should still be a correct value
        random.seed(0)
        for i in range(100):
            n = random.randint(1, 4096)
            print(n, end=' ')

            nose.tools.assert_equal(new_descriptor(n, 'd').nbytes, 8 * n)
            nose.tools.assert_equal(new_descriptor(n, 'f').nbytes, 4 * n)
Beispiel #4
0
 def test_operators(self):
     d = new_descriptor(10)
     b = new_descriptor(10)
     c = new_descriptor(5)
     d[:] = 1
     b[:] = 1
     nose.tools.ok_(d == b)
     nose.tools.ok_(c != b)
     nose.tools.ok_(not c != c)
Beispiel #5
0
    def _step(self):
        object_tracks = self.grab_input_using_trait('object_track_set')

        for object_track in object_tracks.tracks():
            for track_state in object_track:
                if track_state.frame_id == self.current_idx:
                    cur = self.conn.cursor()
                    cur.execute("SELECT track_descriptor.uid FROM track_descriptor "
                                "INNER JOIN track_descriptor_track ON track_descriptor.uid = track_descriptor_track.uid "
                                "INNER JOIN track_descriptor_history ON track_descriptor.uid = track_descriptor_history.uid "
                                "WHERE track_descriptor.video_name = %(video_name)s AND track_descriptor_history.frame_number = %(frame_number)s AND track_descriptor_track.track_id = %(track_id)s",
                                {
                                    "video_name": self.video_name,
                                    "frame_number": track_state.frame_id,
                                    "track_id": object_track.id,
                                })
                    rows = list(cur.fetchall())
                    if len(rows) != 1:
                        raise RuntimeError("Could not get track descriptor")
                    uid = rows[0][0]

                    smqtk_descriptor = self.smqtk_descriptor_index.get_descriptor(uid)

                    vital_descriptor = new_descriptor(len(smqtk_descriptor.vector()), "d")
                    vital_descriptor[:] = smqtk_descriptor.vector()

                    track_state.detection.set_descriptor(vital_descriptor)

                    print("Finished track state: %i %i" % (object_track.id, track_state.frame_id))

        self.push_to_port_using_trait('object_track_set', object_tracks)

        self.current_idx += 1

        self._base_step()
Beispiel #6
0
 def test_tobytearray(self):
     # Expect 0-valued descriptor to have 0-valued byte array of the
     # appropriate size
     d = new_descriptor(64)
     d[:] = 0
     b = d.tobytearray()
     nose.tools.assert_equal(len(b), d.nbytes)
     nose.tools.assert_equal(sum(b), 0)
Beispiel #7
0
 def test_size_multiple(self):
     # Check that size accurately report number of descriptors constructed
     # with.
     d_list = [
         new_descriptor(),
     ]
     ds = DescriptorSet(d_list)
     self.assertEqual(ds.size(), 1)
     self.assertEqual(len(ds), 1)
Beispiel #8
0
    def test_raw_data(self):
        d = new_descriptor(64)
        d[:] = 1
        nose.tools.assert_equal(d.sum(), 64)

        # Check that slicing the array data yields an array with the same
        # values.
        d2 = d[:]
        numpy.testing.assert_equal(d.todoublearray(), d2)
def to_vital(raw_data):
    if len(raw_data) == 0:
        return DescriptorSet()
    vital_descriptors = []
    for item in raw_data:
        new_desc = new_descriptor(item.size)
        new_desc[:] = item
        vital_descriptors.append(new_desc)
    return DescriptorSet(vital_descriptors)
Beispiel #10
0
    def _step(self):
        try:
            # Grab image container from port using traits
            in_img_c = self.grab_input_using_trait('image')
            timestamp = self.grab_input_using_trait('timestamp')
            dos_ptr = self.grab_input_using_trait('detected_object_set')
            print('timestamp = {!r}'.format(timestamp))

            # Get current frame and give it to app feature extractor
            im = get_pil_image(in_img_c.image())
            self._app_feature_extractor.frame = im

            bbox_num = 0

            # Get detection bbox
            dos = dos_ptr.select(self._select_threshold)
            bbox_num = dos.size()

            det_obj_set = DetectedObjectSet()

            if bbox_num == 0:
                print(
                    '!!! No bbox is provided on this frame and skip this frame !!!'
                )
            else:
                # appearance features (format: pytorch tensor)
                app_f_begin = timer()
                pt_app_features = self._app_feature_extractor(dos, False)
                app_f_end = timer()
                print('%%%app feature eclapsed time: {}'.format(app_f_end -
                                                                app_f_begin))

                # get new track state from new frame and detections
                for idx, item in enumerate(dos):
                    bbox = item.bounding_box()
                    fid = timestamp.get_frame()
                    ts = timestamp.get_time_usec()
                    d_obj = item

                    # store app feature to detectedObject
                    app_f = new_descriptor(pt_app_features[idx].numpy().size)
                    app_f[:] = pt_app_features[idx].numpy()
                    # print( pt_app_features[idx].numpy() )
                    d_obj.set_descriptor(app_f)
                    det_obj_set.add(d_obj)

            # push track set to output port
            self.push_to_port_using_trait('detected_object_set', det_obj_set)
            self._base_step()

        except BaseException as e:
            print(repr(e))
            import traceback
            print(traceback.format_exc())
            sys.stdout.flush()
            raise
Beispiel #11
0
    def test_get_descriptors_multiple(self):
        # Test getting descriptors given to the set in its constructor.
        d_list = [
            new_descriptor(1),
            new_descriptor(2),
            new_descriptor(3),
        ]
        d_list[0][:] = [0]
        d_list[1][:] = [1, 2]
        d_list[2][:] = [3, 4, 5]

        ds = DescriptorSet(d_list)

        ds_descriptors = ds.descriptors()
        self.assertEqual(len(ds), 3)
        self.assertEqual(len(ds_descriptors), 3)
        numpy.testing.assert_array_equal(ds_descriptors[0], d_list[0])
        numpy.testing.assert_array_equal(ds_descriptors[1], d_list[1])
        numpy.testing.assert_array_equal(ds_descriptors[2], d_list[2])
Beispiel #12
0
    def _step(self):
        #
        # Grab input values from ports using traits.
        #
        # Vector of UIDs for vector of descriptors in descriptor_set.
        #
        #: :type: list[str]
        string_tuple = self.grab_input_using_trait('string_vector')

        descriptors = self.smqtk_descriptor_index.get_many_descriptors(
            string_tuple)
        vital_descriptors = []

        for desc in descriptors:
            vector = desc.vector()
            vital_desc = new_descriptor(len(vector), "d")
            vital_desc[:] = vector
            vital_descriptors.append(vital_desc)

        vital_descriptor_set = DescriptorSet(vital_descriptors)
        self.push_to_port_using_trait('descriptor_set', vital_descriptor_set)

        self._base_step()
Beispiel #13
0
    def _step(self):
        try:
            def timing(desc, f):
                """Return f(), printing a message about how long it took"""
                start = timer()
                result = f()
                end = timer()
                print('%%%', desc, ' elapsed time: ', end - start, sep='')
                return result

            print('step', self._step_id)

            # grab image container from port using traits
            in_img_c = self.grab_input_using_trait('image')
            timestamp = self.grab_input_using_trait('timestamp')
            dos_ptr = self.grab_input_using_trait('detected_object_set')
            print('timestamp =', repr(timestamp))

            # Get current frame
            im = get_pil_image(in_img_c.image()).convert('RGB')

            # Get detection bbox
            if self._gtbbox_flag:
                dos = self._m_bbox[self._step_id]
                bbox_num = len(dos)
            else:
                dos = dos_ptr.select(self._select_threshold)
                bbox_num = dos.size()
            #print('bbox list len is', dos.size())

            det_obj_set = DetectedObjectSet()
            if bbox_num == 0:
                print('!!! No bbox is provided on this frame.  Skipping this frame !!!')
            else:
                # interaction features
                grid_feature_list = timing('grid feature', lambda:
                                           self._grid(im.size, dos, self._gtbbox_flag))

                # appearance features (format: pytorch tensor)
                pt_app_features = timing('app feature', lambda:
                                         self._app_feature_extractor(im, dos, self._gtbbox_flag))

                track_state_list = []
                next_track_id = int(self._track_set.get_max_track_id()) + 1

                # get new track state from new frame and detections
                for idx, item in enumerate(dos):
                    if self._gtbbox_flag:
                        bbox = item
                        fid = self._step_id
                        ts = self._step_id
                        d_obj = DetectedObject(bbox=item, confidence=1.0)
                    else:
                        bbox = item.bounding_box()
                        fid = timestamp.get_frame()
                        ts = timestamp.get_time_usec()
                        d_obj = item

                    if self._add_features_to_detections:
                        # store app feature to detected_object
                        app_f = new_descriptor(g_config.A_F_num)
                        app_f[:] = pt_app_features[idx].numpy()
                        d_obj.set_descriptor(app_f)
                    det_obj_set.add(d_obj)

                    # build track state for current bbox for matching
                    cur_ts = track_state(frame_id=self._step_id,
                                        bbox_center=bbox.center(),
                                        interaction_feature=grid_feature_list[idx],
                                        app_feature=pt_app_features[idx],
                                        bbox=[int(bbox.min_x()), int(bbox.min_y()),
                                              int(bbox.width()), int(bbox.height())],
                                        detected_object=d_obj,
                                        sys_frame_id=fid, sys_frame_time=ts)
                    track_state_list.append(cur_ts)

                # if there are no tracks, generate new tracks from the track_state_list
                if not self._track_flag:
                    next_track_id = self._track_set.add_new_track_state_list(next_track_id,
                                    track_state_list, self._track_initialization_threshold)
                    self._track_flag = True
                else:
                    # check whether we need to terminate a track
                    for track in list(self._track_set.iter_active()):
                        # terminating a track based on readin_frame_id or original_frame_id gap
                        if (self._step_id - track[-1].frame_id > self._terminate_track_threshold
                            or fid - track[-1].sys_frame_id > self._sys_terminate_track_threshold):
                            self._track_set.deactivate_track(track)


                    # call IOU tracker
                    if self._IOU_flag:
                        self._track_set, track_state_list = timing('IOU tracking', lambda: (
                            self._iou_tracker(self._track_set, track_state_list)
                        ))

                    #print('***track_set len', len(self._track_set))
                    #print('***track_state_list len', len(track_state_list))

                    # estimate similarity matrix
                    similarity_mat, track_idx_list = timing('SRNN association', lambda: (
                        self._srnn_matching(self._track_set, track_state_list, self._ts_threshold)
                    ))

                    # reset update_flag
                    self._track_set.reset_updated_flag()

                    # Hungarian algorithm
                    row_idx_list, col_idx_list = timing('Hungarian algorithm', lambda: (
                        sp.optimize.linear_sum_assignment(similarity_mat)
                    ))

                    for i in range(len(row_idx_list)):
                        r = row_idx_list[i]
                        c = col_idx_list[i]

                        if -similarity_mat[r, c] < self._similarity_threshold:
                            # initialize a new track
                            if (track_state_list[c].detected_object.confidence()
                                   >= self._track_initialization_threshold):
                                self._track_set.add_new_track_state(next_track_id,
                                        track_state_list[c])
                                next_track_id += 1
                        else:
                            # add to existing track
                            self._track_set.update_track(track_idx_list[r], track_state_list[c])

                    # for the remaining unmatched track states, we initialize new tracks
                    if len(track_state_list) - len(col_idx_list) > 0:
                        for i in range(len(track_state_list)):
                            if (i not in col_idx_list
                                and (track_state_list[i].detected_object.confidence()
                                     >= self._track_initialization_threshold)):
                                self._track_set.add_new_track_state(next_track_id,
                                        track_state_list[i])
                                next_track_id += 1

                print('total tracks', len(self._track_set))

            # push track set to output port
            ot_list = ts2ot_list(self._track_set)
            ots = ObjectTrackSet(ot_list)

            self.push_to_port_using_trait('object_track_set', ots)
            self.push_to_port_using_trait('detected_object_set', det_obj_set)

            self._step_id += 1

            self._base_step()

        except BaseException as e:
            print( repr( e ) )
            import traceback
            print( traceback.format_exc() )
            sys.stdout.flush()
            raise
Beispiel #14
0
 def test_size(self):
     # Check that we can check the size of the descriptor array.
     random.seed(0)
     for i in range(100):
         n = random.randint(1, 4096)
         nose.tools.assert_equal(new_descriptor(n).size, n)
Beispiel #15
0
 def setUp(self):
   self.f1 = FeatureF([1, 1], 1, 2, 1)
   self.desc = new_descriptor(33, 'd')
   self.ts = TrackState(0)