def play(self, repeat=True, pause=30):
     # pause should be equivalent to the ghost tail length in liveview, if that's what you're using this for
     
     repetitions = 1
     if repeat is True:
         repetitions = 1000000
     elif repeat is False:
         repetitions = 1
     else:
         repetitions = repeat
         
     rep = 0
     while rep < repetitions and not rospy.is_shutdown():
         rep += 1
     
         r = rospy.Rate(self.fps)        
         for frame in range(self.trajectory.length):
             if rospy.is_shutdown():        
                 break
             acquire_stamp = rospy.Time.now()
             position = self.trajectory.positions[frame,:]
             velocity = self.trajectory.velocities[frame,:]
             
             flydra_object = msgs.flydra_object( self.trajectory.obj_id, 
                                                 geometry_msgs.Point(position[0], position[1], position[2]), 
                                                 geometry_msgs.Vector3(velocity[0], velocity[1], velocity[2]), 
                                                 self.posvel_covariance_diagonal)
             flydra_objects = [flydra_object]
             
             framenumber = frame
             reconstruction_stamp = rospy.Time.now()
             objects = flydra_objects
             flydra_mainbrain_packet = msgs.flydra_mainbrain_packet(framenumber, reconstruction_stamp, acquire_stamp, objects)
             flydra_mainbrain_super_packet = msgs.flydra_mainbrain_super_packet([flydra_mainbrain_packet])
             print rep, frame
             self.pub.publish(flydra_mainbrain_super_packet)
             r.sleep()
         for i in range(pause):
             if rospy.is_shutdown():        
                 break
             acquire_stamp = rospy.Time.now()
             position = self.trajectory.positions[-1,:]
             velocity = self.trajectory.velocities[-1,:]
             
             flydra_object = msgs.flydra_object( self.trajectory.obj_id, 
                                                 geometry_msgs.Point(position[0], position[1], position[2]), 
                                                 geometry_msgs.Vector3(velocity[0], velocity[1], velocity[2]), 
                                                 self.posvel_covariance_diagonal)
             flydra_objects = [flydra_object]
             
             framenumber = frame
             reconstruction_stamp = rospy.Time.now()
             objects = flydra_objects
             flydra_mainbrain_packet = msgs.flydra_mainbrain_packet(framenumber, reconstruction_stamp, acquire_stamp, objects)
             flydra_mainbrain_super_packet = msgs.flydra_mainbrain_super_packet([flydra_mainbrain_packet])
             print rep, frame
             self.pub.publish(flydra_mainbrain_super_packet)
             r.sleep()
예제 #2
0
    def enqueue_finished_tracked_object(self, tracked_object):
        # this is from called within the realtime coords thread
        if self.main_brain.is_saving_data():
            self.main_brain.queue_data3d_kalman_estimates.put((
                tracked_object.obj_id,
                tracked_object.frames,
                tracked_object.xhats,
                tracked_object.Ps,
                tracked_object.timestamps,
                tracked_object.observations_frames,
                tracked_object.MLE_position,
                tracked_object.observations_2d,
                tracked_object.MLE_Lcoords,
            ))
        # send a ROS message that this object is dead
        this_ros_object = flydra_object(
            obj_id=tracked_object.obj_id,
            position=Point(numpy.nan, numpy.nan, numpy.nan),
        )
        try:
            reconstruction_stamp = rospy.Time.now()
            acquire_stamp = rospy.Time.from_sec(0)
        except rospy.exceptions.ROSInitException as err:
            pass
        else:
            ros_packet = flydra_mainbrain_packet(
                framenumber=tracked_object.current_frameno,
                reconstruction_stamp=reconstruction_stamp,
                acquire_stamp=acquire_stamp,
                objects=[this_ros_object])
            self.realtime_ros_packets_pub.publish(
                flydra_mainbrain_super_packet([ros_packet]))

        if self.debug_level.isSet():
            LOG.debug('killing obj_id %d:' % tracked_object.obj_id)
예제 #3
0
    def get_objects(self):
        acquire_stamp = rospy.Time.now()
        self.framenumber = self.framenumber + 1
        time.sleep(self.latency)
        
        birth_check = np.random.random()
        if birth_check < self.prob_birth:
            if len(self.objects) < self.max_num_objects:
                self.newest_object = self.newest_object+1
                self.objects.append(DummyObject(self.newest_object))

        death_check = np.random.random()
        if death_check < self.prob_death:
            if len(self.objects) > 1:
                del self.objects [np.random.randint(0,len(self.objects))]

        # package with mainbrain message format
        flydra_objects = []
        for i in range(len(self.objects)):
            obj_id, position, velocity, posvel_covariance_diagonal = self.objects[i].get_state()
            flydra_object = msgs.flydra_object(obj_id, geometry_msgs.Point(position[0], position[1], position[2]), geometry_msgs.Vector3(velocity[0], velocity[1], velocity[2]), posvel_covariance_diagonal)
            flydra_objects.append(flydra_object)
        
        framenumber = self.framenumber
        reconstruction_stamp = rospy.Time.now()
        objects = flydra_objects
        flydra_mainbrain_packet = msgs.flydra_mainbrain_packet(framenumber, reconstruction_stamp, acquire_stamp, objects)
        flydra_mainbrain_super_packet = msgs.flydra_mainbrain_super_packet([flydra_mainbrain_packet])
        
        self.pub.publish(flydra_mainbrain_super_packet)
        return flydra_mainbrain_super_packet
예제 #4
0
    def get_objects(self):
        acquire_stamp = rospy.Time.now()
        self.framenumber = self.framenumber + 1
        time.sleep(self.latency)
        
        birth_check = np.random.random()
        if birth_check < self.prob_birth and len(self.point_list) < self.max_num_objects:
            if verbose_mode:
                print "\nGenerating new object..."
            self.newest_object = self.newest_object+1
            self.point_list.append(DummyPoint(self.newest_object))
            if verbose_mode:
                print "Now %d total objects." % len(self.point_list)

        death_check = np.random.random()
        if death_check < self.prob_death and len(self.point_list) > 1:
            ind_to_kill = np.random.randint(0,len(self.point_list))
            if verbose_mode:
                print "\nKilling object %d (at index %d)..." % (self.point_list[ind_to_kill].get_id(), ind_to_kill)
            del self.point_list[ind_to_kill]

        # package with mainbrain message format
        flydra_objects = []
        for i in range(len(self.point_list)):
            obj_id, position, velocity, posvel_covariance_diagonal = self.point_list[i].get_state()
            flydra_object = msgs.flydra_object(obj_id, geometry_msgs.Point(position[0], position[1], position[2]), geometry_msgs.Vector3(velocity[0], velocity[1], velocity[2]), posvel_covariance_diagonal)
            flydra_objects.append(flydra_object)
        
        framenumber = self.framenumber
        reconstruction_stamp = rospy.Time.now()
        objects = flydra_objects
        flydra_mainbrain_packet = msgs.flydra_mainbrain_packet(framenumber, reconstruction_stamp, acquire_stamp, objects)
        flydra_mainbrain_super_packet = msgs.flydra_mainbrain_super_packet([flydra_mainbrain_packet])
        
        self.pub.publish(flydra_mainbrain_super_packet)
        return flydra_mainbrain_super_packet
예제 #5
0
    def _process_parsed_data(
        self,
        cam_idx,
        camn_received_time,
        absolute_cam_no,
        n_pts,
        cam_id,
        raw_framenumber,
        new_data_framenumbers,
        points_in_pluecker_coords_meters,
        points_distorted,
        points_undistorted,
        deferred_2d_data,
    ):
        # Note: this must be called with self.all_data_lock acquired.

        timestamp_check_dict = self.timestamp_check_dict
        convert_format = flydra_kalman_utils.convert_format  # shorthand
        realtime_coord_dict = self.realtime_coord_dict
        oldest_timestamp_by_corrected_framenumber = self.oldest_timestamp_by_corrected_framenumber
        realtime_kalman_coord_dict = self.realtime_kalman_coord_dict

        if 1:
            if 1:
                if 1:
                    if 1:
                        # Use camn_received_time to determine sync
                        # info. This avoids 2 potential problems:
                        #  * using raw_timestamp can fail if the
                        #    camera drivers don't provide useful data
                        #  * using time.time() can fail if the network
                        #    latency jitter is on the order of the
                        #    inter frame interval.
                        corrected_framenumber, did_frame_offset_change = \
                          self.register_frame(cam_id,raw_framenumber)

                        trigger_timestamp = self.main_brain.trigger_device.framestamp2timestamp(
                            corrected_framenumber)
                        if did_frame_offset_change:
                            self.OnSynchronize(cam_idx, cam_id,
                                               raw_framenumber,
                                               trigger_timestamp)
                            new_data_framenumbers.clear()

                        self.last_timestamps[cam_idx] = trigger_timestamp
                        self.last_framenumbers_delay[cam_idx] = raw_framenumber
                        self.main_brain.framenumber = corrected_framenumber

                        if self.main_brain.is_saving_data():
                            for point_tuple in points_distorted:
                                # Save 2D data (even when no point found) to allow
                                # temporal correlation of movie frames to 2D data.
                                frame_pt_idx = point_tuple[
                                    PT_TUPLE_IDX_FRAME_PT_IDX]
                                cur_val = point_tuple[PT_TUPLE_IDX_CUR_VAL_IDX]
                                mean_val = point_tuple[
                                    PT_TUPLE_IDX_MEAN_VAL_IDX]
                                sumsqf_val = point_tuple[
                                    PT_TUPLE_IDX_SUMSQF_VAL_IDX]
                                if corrected_framenumber is None:
                                    # don't bother saving if we don't know when it was from
                                    continue
                                deferred_2d_data.append((
                                    absolute_cam_no,  # defer saving to later
                                    corrected_framenumber,
                                    trigger_timestamp,
                                    camn_received_time) + point_tuple[:5] +
                                                        (frame_pt_idx, cur_val,
                                                         mean_val, sumsqf_val))
                        # save new frame data

                        if corrected_framenumber not in realtime_coord_dict:
                            realtime_coord_dict[corrected_framenumber] = {}
                            timestamp_check_dict[corrected_framenumber] = {}

                        # For hypothesis testing: attempt 3D reconstruction of 1st point from each 2D view
                        realtime_coord_dict[corrected_framenumber][
                            cam_id] = points_undistorted[0]
                        #timestamp_check_dict[corrected_framenumber][cam_id]= camn_received_time
                        timestamp_check_dict[corrected_framenumber][
                            cam_id] = trigger_timestamp

                        if len(points_in_pluecker_coords_meters):
                            # save all 3D Pluecker coordinates for Kalman filtering
                            realtime_kalman_coord_dict[corrected_framenumber][
                                absolute_cam_no] = (
                                    points_in_pluecker_coords_meters)

                        if n_pts:
                            inc_val = 1
                        else:
                            inc_val = 0

                        if corrected_framenumber in oldest_timestamp_by_corrected_framenumber:
                            orig_timestamp, n = oldest_timestamp_by_corrected_framenumber[
                                corrected_framenumber]
                            if orig_timestamp is None:
                                oldest = trigger_timestamp  # this may also be None, but eventually won't be
                            else:
                                oldest = min(trigger_timestamp, orig_timestamp)
                            oldest_timestamp_by_corrected_framenumber[
                                corrected_framenumber] = (oldest, n + inc_val)
                            del oldest, n, orig_timestamp
                        else:
                            oldest_timestamp_by_corrected_framenumber[
                                corrected_framenumber] = trigger_timestamp, inc_val

                        new_data_framenumbers.add(
                            corrected_framenumber)  # insert into set

                finished_corrected_framenumbers = []  # for quick deletion

                ########################################################################

                # Now we've grabbed all data waiting on network. Now it's
                # time to calculate 3D info.

                # XXX could go for latest data first to minimize latency
                # on that data.

                ########################################################################

                for corrected_framenumber in new_data_framenumbers:
                    oldest_camera_timestamp, n = oldest_timestamp_by_corrected_framenumber[
                        corrected_framenumber]
                    if oldest_camera_timestamp is None:
                        #LOG.info('no latency estimate available -- skipping 3D reconstruction')
                        continue
                    if (time.time() - oldest_camera_timestamp
                        ) > self.max_reconstruction_latency_sec:
                        #LOG.info('maximum reconstruction latency exceeded -- skipping 3D reconstruction')
                        continue

                    data_dict = realtime_coord_dict[corrected_framenumber]
                    if len(data_dict) == len(
                            self.cam_ids):  # all camera data arrived

                        if self.debug_level.isSet():
                            LOG.debug('frame %d' % (corrected_framenumber))

                        # mark for deletion out of data queue
                        finished_corrected_framenumbers.append(
                            corrected_framenumber)

                        if self.reconstructor is None:
                            # can't do any 3D math without calibration information
                            self.main_brain.best_realtime_data = None
                            continue

                        if not self.main_brain.trigger_device.have_estimate():
                            # acquire_stamp (the proximate error, however latency estimates for the same reason)
                            # cannot be calculated unless the triggerbox has a clock model
                            continue

                        if 1:
                            with self.tracker_lock:
                                if self.tracker is None:  # tracker isn't instantiated yet...
                                    self.main_brain.best_realtime_data = None
                                    continue

                                pluecker_coords_by_camn = realtime_kalman_coord_dict[
                                    corrected_framenumber]

                                if self.save_profiling_data:
                                    dumps = pickle.dumps(
                                        pluecker_coords_by_camn)
                                    self.data_dict_queue.append(
                                        ('gob', (corrected_framenumber, dumps,
                                                 self.camn2cam_id)))
                                pluecker_coords_by_camn = self.tracker.calculate_a_posteriori_estimates(
                                    corrected_framenumber,
                                    pluecker_coords_by_camn,
                                    self.camn2cam_id,
                                    debug2=self.debug_level.isSet())

                                if self.debug_level.isSet():
                                    LOG.debug(
                                        '%d live objects:' %
                                        self.tracker.how_many_are_living())
                                    results = self.tracker.get_most_recent_data(
                                    )
                                    for result in results:
                                        if result is None:
                                            continue
                                        obj_id, last_xhat, P = result
                                        Pmean = numpy.sqrt(
                                            numpy.sum([
                                                P[i, i]**2 for i in range(3)
                                            ]))
                                        LOG.debug(
                                            'obj_id %d: (%.3f, %.3f, %.3f), Pmean: %.3f'
                                            % (obj_id, last_xhat[0],
                                               last_xhat[1], last_xhat[2],
                                               Pmean))

                                if self.save_profiling_data:
                                    self.data_dict_queue.append(
                                        ('ntrack',
                                         self.tracker.how_many_are_living()))

                                now = time.time()
                                if self.show_overall_latency.isSet():
                                    oldest_camera_timestamp, n = oldest_timestamp_by_corrected_framenumber[
                                        corrected_framenumber]
                                    if n > 0:
                                        if 0:
                                            LOG.info(
                                                'overall latency %d: %.1f msec (oldest: %s now: %s)'
                                                % (
                                                    n,
                                                    (now -
                                                     oldest_camera_timestamp) *
                                                    1e3,
                                                    repr(
                                                        oldest_camera_timestamp
                                                    ),
                                                    repr(now),
                                                ))
                                        else:

                                            LOG.info(
                                                'overall latency (%d camera detected 2d points): %.1f msec (note: may exclude camera->camera computer latency)'
                                                % (
                                                    n,
                                                    (now -
                                                     oldest_camera_timestamp) *
                                                    1e3,
                                                ))

                                if 1:
                                    # The above calls
                                    # self.enqueue_finished_tracked_object()
                                    # when a tracked object is no longer
                                    # tracked.

                                    # Now, tracked objects have been updated (and their 2D data points
                                    # removed from consideration), so we can use old flydra
                                    # "hypothesis testing" algorithm on remaining data to see if there
                                    # are new objects.

                                    results = self.tracker.get_most_recent_data(
                                    )
                                    Xs = []
                                    for result in results:
                                        if result is None:
                                            continue
                                        obj_id, last_xhat, P = result
                                        X = last_xhat[0], last_xhat[
                                            1], last_xhat[2]
                                        Xs.append(X)
                                    if len(Xs):
                                        self.main_brain.best_realtime_data = Xs, 0.0
                                    else:
                                        self.main_brain.best_realtime_data = None

                                # Convert to format accepted by find_best_3d()
                                found_data_dict, first_idx_by_cam_id = convert_format(
                                    pluecker_coords_by_camn,
                                    self.camn2cam_id,
                                    area_threshold=0.0,
                                    only_likely=True)

                                if len(found_data_dict) >= 2:
                                    # Can't do any 3D math without at least 2 cameras giving good
                                    # data.
                                    max_error = \
                                        self.tracker.kalman_model['hypothesis_test_max_acceptable_error']
                                    with_water = self.reconstructor.wateri is not None

                                    try:
                                        (
                                            this_observation_3d,
                                            this_observation_Lcoords,
                                            cam_ids_used, min_mean_dist
                                        ) = ru.hypothesis_testing_algorithm__find_best_3d(
                                            self.reconstructor,
                                            found_data_dict,
                                            max_error,
                                            max_n_cams=self.
                                            max_N_hypothesis_test,
                                            with_water=with_water,
                                        )
                                    except ru.NoAcceptablePointFound:
                                        pass
                                    else:
                                        this_observation_camns = [
                                            self.cam_id2cam_no[cam_id]
                                            for cam_id in cam_ids_used
                                        ]
                                        this_observation_idxs = [
                                            first_idx_by_cam_id[cam_id]
                                            for cam_id in cam_ids_used
                                        ]
                                        ####################################
                                        #  Now join found point into Tracker
                                        if self.save_profiling_data:
                                            self.data_dict_queue.append(
                                                ('join',
                                                 (corrected_framenumber,
                                                  this_observation_3d,
                                                  this_observation_Lcoords,
                                                  this_observation_camns,
                                                  this_observation_idxs)))
                                        # test for novelty
                                        believably_new = self.tracker.is_believably_new(
                                            this_observation_3d)
                                        if believably_new:
                                            self.tracker.join_new_obj(
                                                corrected_framenumber,
                                                this_observation_3d,
                                                this_observation_Lcoords,
                                                this_observation_camns,
                                                this_observation_idxs)
                                if 1:
                                    if self.tracker.how_many_are_living():
                                        #publish state to ROS
                                        results = self.tracker.get_most_recent_data(
                                        )
                                        ros_objects = []
                                        for result in results:
                                            if result is None:
                                                continue
                                            obj_id, xhat, P = result
                                            this_ros_object = flydra_object(
                                                obj_id=obj_id,
                                                position=Point(*xhat[:3]),
                                                velocity=Vector3(*xhat[3:6]),
                                                posvel_covariance_diagonal=numpy
                                                .diag(P)[:6].tolist())
                                            ros_objects.append(this_ros_object)
                                        ros_packet = flydra_mainbrain_packet(
                                            framenumber=corrected_framenumber,
                                            reconstruction_stamp=rospy.Time.
                                            from_sec(now),
                                            acquire_stamp=rospy.Time.from_sec(
                                                oldest_camera_timestamp),
                                            objects=ros_objects)
                                        self.realtime_ros_packets_pub.publish(
                                            flydra_mainbrain_super_packet(
                                                [ros_packet]))

                for finished in finished_corrected_framenumbers:
                    #check that timestamps are in reasonable agreement (low priority)
                    diff_from_start = []
                    for cam_id, tmp_trigger_timestamp in timestamp_check_dict[
                            finished].iteritems():
                        diff_from_start.append(tmp_trigger_timestamp)
                    timestamps_by_cam_id = numpy.array(diff_from_start)

                    if self.show_sync_errors:
                        if len(timestamps_by_cam_id):
                            if numpy.max(
                                    abs(timestamps_by_cam_id -
                                        timestamps_by_cam_id[0])) > 0.005:
                                LOG.warn(
                                    'timestamps off by more than 5 msec -- synchronization error'
                                )
                                self.main_brain.error_ros_msgs_pub.publish(
                                    FlydraError(FlydraError.CAM_TIMESTAMPS_OFF,
                                                ""))

                    del realtime_coord_dict[finished]
                    del timestamp_check_dict[finished]
                    try:
                        del realtime_kalman_coord_dict[finished]
                    except KeyError:
                        pass

                # Clean up old frame records to save RAM.
                # This is only needed when multiple cameras are not
                # synchronized, (When camera-camera frame
                # correspondences are unknown.)
                if len(realtime_coord_dict) > 100:
                    #dont spam the console at startup (i.e. before a sync has been attemted)
                    if self.ever_synchronized:
                        LOG.warn(
                            'Cameras not synchronized or network dropping packets -- unmatched 2D data accumulating'
                        )
                        self.main_brain.error_ros_msgs_pub.publish(
                            FlydraError(FlydraError.NOT_SYNCHRONIZED, ""))

                    k = realtime_coord_dict.keys()
                    k.sort()

                    # get one sample
                    corrected_framenumber = k[0]
                    data_dict = realtime_coord_dict[corrected_framenumber]
                    this_cam_ids = data_dict.keys()
                    missing_cam_id_guess = list(
                        set(self.cam_ids) - set(this_cam_ids))
                    if len(missing_cam_id_guess) and self.ever_synchronized:
                        delta = list(set(self.cam_ids) - set(this_cam_ids))
                        LOG.warn('a guess at missing cam_id(s): %r' % delta)
                        for d in delta:
                            self.main_brain.error_ros_msgs_pub.publish(
                                FlydraError(FlydraError.MISSING_DATA, d))

                    for ki in k[:-50]:
                        del realtime_coord_dict[ki]
                        del timestamp_check_dict[ki]

                if len(realtime_kalman_coord_dict) > 100:
                    LOG.warn(
                        'deleting unused 3D data (this should be a rare occurrance)'
                    )
                    self.main_brain.error_ros_msgs_pub.publish(
                        FlydraError(FlydraError.UNUSED_3D_DATA, ""))
                    k = realtime_kalman_coord_dict.keys()
                    k.sort()
                    for ki in k[:-50]:
                        del realtime_kalman_coord_dict[ki]

                if len(oldest_timestamp_by_corrected_framenumber) > 100:
                    k = oldest_timestamp_by_corrected_framenumber.keys()
                    k.sort()
                    for ki in k[:-50]:
                        del oldest_timestamp_by_corrected_framenumber[ki]

                if len(deferred_2d_data):
                    self.main_brain.queue_data2d.put(deferred_2d_data)