Ejemplo n.º 1
0
    def begin_collision_detection(self,
                                  dynamic_detection=False,
                                  callback=None,
                                  static_collision_type=None,
                                  debug=False):

        if not self.perception_started:
            err("Perception hasn't been started!")
            return False
        # if not self.detect_ready:
        #     err("ready_collision_detection hasn't been called")
        #     return False

        self.is_dynamic = dynamic_detection
        self.callback = callback
        if not dynamic_detection:
            self._setup_collision_type(static_collision_type)
        self.collision_detected = False
        self.collision_finished = False
        self.debug = debug
        self.collision_type = "No collision"
        self.cur_iter = 0
        self.cur_inds = {}

        for k in self.perceptions:
            self.cur_inds[k] = 0

        self.i_data_buffers = {}
        self.t_data_buffers = {}
        for k in self.perceptions:
            self.i_data_buffers[k] = [
                np.zeros(
                    (self.impact_fgc.filter_len, self.impact_fgc.filter_len))
                for i in range(self.perception_lengths[k])
            ]
            self.t_data_buffers[k] = [
                np.zeros((self.type_fgc.filter_len, self.type_fgc.filter_len))
                for i in range(self.perception_lengths[k])
            ]
        self.end_monitor_lock = threading.Lock()

        self.data_thread_spawner_lock1 = threading.Lock()
        self.data_thread_spawner_lock2 = threading.Lock()
        self.data_thread_spawner = RateCaller(self._data_spawn_thread,
                                              self.SAMPLING_RATE)
        self.data_thread_spawner.run()

        self.beg_time = rospy.Time.now().to_sec()

        self.t_sum, self.t_num = 0., 0.
        self.normal_dict_counts = []
        self.temp_dict = {}
        for k in self.perceptions:
            self.temp_dict[k] = [[] for i in range(self.perception_lengths[k])]

        return True
Ejemplo n.º 2
0
    def get_zeros(self, time=4.):
        self._zeros = {}
        for k in self.perceptions:
            self._zeros[k] = None
        self._n = 0
            
        monitor = RateCaller(self._sum_values, self.rate)
        monitor.run()
        rospy.sleep(time)
        monitor.stop()

        for k in self.perceptions:
            self._zeros[k] /= self._n
        return self._zeros
Ejemplo n.º 3
0
    def start_data_capture(self, duration=None):
        if not self.perception_started:
            err("Perception hasn't been started!")
            return False

        self.logger = RateCaller(self._gather_perception, self.SAMPLING_RATE)

        for k in self.perceptions:
            self.datasets[k] += [[]]
        self.logger.run()

        if not duration is None:
            threading.Timer(self.stop_data_capture, duration)

        return True
Ejemplo n.º 4
0
    def start_data_capture(self, duration=None):
        if not self.perception_started:
            err("Perception hasn't been started!")
            return False

        self.logger = RateCaller(self._gather_perception, self.SAMPLING_RATE)

        for k in self.perceptions:
            self.datasets[k] += [[]]
        self.logger.run()

        if not duration is None:
            threading.Timer(self.stop_data_capture, duration)

        return True
Ejemplo n.º 5
0
    def begin_monitoring(self, models=None, model_zeros=None,
                               std_dev_dict=None, noise_dev_dict=None, duration=None,
                               std_dev_default=2.0, noise_dev_default=0.25,
                               tol_thresh_dict=None,
                               contingency=None, window_size=70, current_zeros=None,
                               sampling_rate = 1,
                               only_pressure=False,
                               transform_dict=None, verbose=True, collide=True):
        if self.active:
            log("Perception already active.")
            return
        self.active = True


        self.setup_monitoring(models, 
                               std_dev_dict=std_dev_dict, noise_dev_dict=noise_dev_dict,
                               duration=duration, std_dev_default=std_dev_default, 
                               noise_dev_default=noise_dev_default,                         
                               tol_thresh_dict=tol_thresh_dict,
                               contingency=contingency, window_size=window_size, 
                               sampling_rate=sampling_rate,
                               current_zeros=current_zeros, transform_dict=transform_dict, 
                               verbose=verbose, collide=collide)
        # if models is not None:
        #     self.models = models
        if model_zeros is not None:
            self.model_zeros = model_zeros
        self.cur_pt = 0
        self.collision_times = {}
        self.collision_sums = {}
        self.cur_col_time = 0
        self.current_zeros = {}
        self.z_sum = {}
        self.z_rsum = {}
        self.z_list = {}
        self.min_prob = 100000.0
        self.cur_mals = {}
        self.mals = None
        self.only_pressure = only_pressure

        self.monitor = RateCaller(self._monitor_data, self.rate * self.sampling_rate)
        self.monitor.run()
        # self.sumcalls = {}
        # self.sumsave = {}

        if not duration is None:
            self.dur_timer = threading.Timer(self.end_monitoring, duration)
            self.dur_timer.start()
Ejemplo n.º 6
0
    def begin_collision_detection(self, dynamic_detection=False, 
                                  callback=None, static_collision_type=None,
                                  debug=False):

        if not self.perception_started:
            err("Perception hasn't been started!")
            return False
        # if not self.detect_ready:
        #     err("ready_collision_detection hasn't been called")
        #     return False

        self.is_dynamic = dynamic_detection
        self.callback = callback
        if not dynamic_detection:
            self._setup_collision_type(static_collision_type)
        self.collision_detected = False
        self.collision_finished = False
        self.debug = debug
        self.collision_type = "No collision"
        self.cur_iter = 0
        self.cur_inds = {}

        for k in self.perceptions:
            self.cur_inds[k] = 0

        self.i_data_buffers = {}
        self.t_data_buffers = {}
        for k in self.perceptions:
            self.i_data_buffers[k] = [ np.zeros((self.impact_fgc.filter_len, self.impact_fgc.filter_len)) for i in range(self.perception_lengths[k])]
            self.t_data_buffers[k] = [ np.zeros((self.type_fgc.filter_len, self.type_fgc.filter_len)) for i in range(self.perception_lengths[k])]
        self.end_monitor_lock = threading.Lock()

        self.data_thread_spawner_lock1 = threading.Lock()
        self.data_thread_spawner_lock2 = threading.Lock()
        self.data_thread_spawner = RateCaller(self._data_spawn_thread, self.SAMPLING_RATE ) 
        self.data_thread_spawner.run()

        self.beg_time = rospy.Time.now().to_sec()

        self.t_sum, self.t_num = 0., 0.
        self.normal_dict_counts = []
        self.temp_dict = {}
        for k in self.perceptions:
            self.temp_dict[k] = [[] for i in range(self.perception_lengths[k])]

        return True
Ejemplo n.º 7
0
class ArmPerceptionMonitor( ):

    ##
    # Initializes internal variables
    #
    # @param arm 0 if right, 1 if left
    # @param percept_mon_list list of perceptions to monitor; if None, do all
    def __init__(self, arm):
        self.load_parameters()
        ############## Classifiers #################
        self.impact_classifier = cfiers.classifiers_dict["small_refined_random_forest"]
        self.coll_type_classifier = cfiers.classifiers_dict["large_refined_random_forest"]

        log("Loading impact classifier")
        self.impact_classifier.load(self.I_RTREE_CLASSIFIER)
        log("Loading collision type classifier")
        self.coll_type_classifier.load(self.T_RTREE_CLASSIFIER)

        if arm == 0:
            self.armc = "r"
            self.is_right_arm = True
        else:
            self.armc = "l"
            self.is_right_arm = False

        self.perceptions = {}

        self.perception_names = [ "accelerometer",
                                  "joint_angles",
                                  "joint_velocities",
                                  "joint_efforts",
                                  "r_finger_periph_pressure",
                                  "r_finger_pad_pressure", 
                                  "l_finger_periph_pressure",
                                  "l_finger_pad_pressure",
                                  "gripper_pose"]
        self.perception_lengths = { "accelerometer" : 3,
                                  "joint_angles" : 7,
                                  "joint_velocities" : 7,
                                  "joint_efforts" : 7,
                                  "r_finger_periph_pressure" : 1,
                                  "r_finger_pad_pressure" : 1, 
                                  "l_finger_periph_pressure" : 1,
                                  "l_finger_pad_pressure" : 1,
                                  "gripper_pose" : 7}

        self.percept_mon_list = self.perception_names

        self.impact_fgc = FastGaussConvolve(self.SAMPLING_RATE, self.I_FILTER_BEG_DELAY,
                                     self.I_FILTER_CEN_DELAY, self.i_sigma_list)
        self.type_fgc = FastGaussConvolve(self.SAMPLING_RATE, self.T_FILTER_BEG_DELAY,
                                     self.T_FILTER_CEN_DELAY, self.t_sigma_list)

        self.perception_started = False
        # self.detect_ready = False
        self.fos = FileOperations()

    def load_parameters(self):
        self.SAMPLING_RATE = rospy.get_param("/overhead_grasping/data_sampling_rate")
        self.PERCEPTION_ORDER= rospy.get_param("/overhead_grasping/perception_order")

        # impact parameters
        self.I_FILTER_BEG_DELAY = rospy.get_param("/overhead_grasping/i_filter_beg_delay")
        self.I_FILTER_CEN_DELAY = rospy.get_param("/overhead_grasping/i_filter_cen_delay")
        self.I_MIN_SIGMA = rospy.get_param("/overhead_grasping/i_min_sigma")
        self.I_MAX_SIGMA = rospy.get_param("/overhead_grasping/i_max_sigma")
        self.I_NUM_SIGMA = rospy.get_param("/overhead_grasping/i_num_sigma")
        self.i_sigma_list = np.linspace(self.I_MIN_SIGMA,
                                        self.I_MAX_SIGMA,
                                        self.I_NUM_SIGMA)
        self.I_DEGREE_DICT = rospy.get_param("/overhead_grasping/i_degree_dict")
        self.i_instance_len = 0
        for k in self.PERCEPTION_ORDER:
            for coord in self.I_DEGREE_DICT[k]:
                for deg in coord:
                    self.i_instance_len += self.I_NUM_SIGMA
        self.I_RTREE_CLASSIFIER = rospy.get_param("/overhead_grasping/i_rtree_classifier")

        # collision type parameters
        self.T_FILTER_BEG_DELAY = rospy.get_param("/overhead_grasping/t_filter_beg_delay")
        self.T_FILTER_CEN_DELAY = rospy.get_param("/overhead_grasping/t_filter_cen_delay")
        self.T_MIN_SIGMA = rospy.get_param("/overhead_grasping/t_min_sigma")
        self.T_MAX_SIGMA = rospy.get_param("/overhead_grasping/t_max_sigma")
        self.T_NUM_SIGMA = rospy.get_param("/overhead_grasping/t_num_sigma")
        self.t_sigma_list = np.linspace(self.T_MIN_SIGMA,
                                        self.T_MAX_SIGMA,
                                        self.T_NUM_SIGMA)
        self.T_DEGREE_DICT = rospy.get_param("/overhead_grasping/t_degree_dict")
        self.t_instance_len = 0
        for k in self.PERCEPTION_ORDER:
            for coord in self.T_DEGREE_DICT[k]:
                for deg in coord:
                    self.t_instance_len += self.T_NUM_SIGMA
        self.T_RTREE_CLASSIFIER = rospy.get_param("/overhead_grasping/t_rtree_classifier")

        self.STATIC_SIGMA_INDEX = rospy.get_param("/overhead_grasping/static_sigma_index")
        self.STATIC_DERIV_THRESH = rospy.get_param("/overhead_grasping/static_deriv_thresh")

    ##
    # Initializes the listeners on the sensor topics.  This must
    # be started before any data can be collected from the arms.
    def activate_sensing(self, tf_listener=None):

        log("Initializing arm perception listeners")
        self.tf_listener = tf_listener

        if "accelerometer" in self.percept_mon_list:
            accel_listener = GenericListener("accel_mon_node", AccelerometerState, 
                                     "accelerometer/" + self.armc + "_gripper_motor",
                                     self.SAMPLING_RATE, accel_state_processor)
            self.perceptions["accelerometer"] = accel_listener.read

        if "joint_angles" in self.percept_mon_list:
            joint_angles_processor = ft.partial(joints_state_processor, 
                                                right_arm=self.is_right_arm, 
                                                angles_velocities_efforts=0)
            joint_angles_listener = GenericListener("joint_angles_mon_node", JointState, 
                                               "joint_states", self.SAMPLING_RATE, joint_angles_processor)
            self.perceptions["joint_angles"] = joint_angles_listener.read

        if "joint_velocities" in self.percept_mon_list:
            joint_velocities_processor = ft.partial(joints_state_processor, 
                                                right_arm=self.is_right_arm, 
                                                angles_velocities_efforts=1)
            joint_velocities_listener = GenericListener("joint_vels_mon_node", JointState, 
                                              "joint_states", self.SAMPLING_RATE, joint_velocities_processor)
            self.perceptions["joint_velocities"] = joint_velocities_listener.read

        if "joint_efforts" in self.percept_mon_list:
            joint_efforts_processor = ft.partial(joints_state_processor, 
                                                right_arm=self.is_right_arm, 
                                                angles_velocities_efforts=2)
            joint_efforts_listener = GenericListener("joint_efforts_mon_node", JointState, 
                                              "joint_states", self.SAMPLING_RATE, joint_efforts_processor)
            self.perceptions["joint_efforts"] = joint_efforts_listener.read

        if "r_finger_periph_pressure" in self.percept_mon_list:
            r_finger_periph_pressure_processor = ft.partial(pressure_state_sum_processor, 
                                            right_finger_tip=True, indicies=range(1,7))
            r_finger_periph_pressure_listener = GenericListener(self.armc + "_pressure_r_periph_mon_node", PressureState, 
                                                         "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE, 
                                                         r_finger_periph_pressure_processor)
            self.perceptions["r_finger_periph_pressure"] = r_finger_periph_pressure_listener.read

        if "r_finger_pad_pressure" in self.percept_mon_list:
            r_finger_pad_pressure_processor = ft.partial(pressure_state_sum_processor, 
                                            right_finger_tip=True, indicies=range(7,22))
            r_finger_pad_pressure_listener = GenericListener(self.armc + "_pressure_r_pad_mon_node", PressureState, 
                                                         "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE, 
                                                         r_finger_pad_pressure_processor)
            self.perceptions["r_finger_pad_pressure"] = r_finger_pad_pressure_listener.read

        if "l_finger_periph_pressure" in self.percept_mon_list:
            l_finger_periph_pressure_processor = ft.partial(pressure_state_sum_processor, 
                                            right_finger_tip=False, indicies=range(1,7))
            l_finger_periph_pressure_listener = GenericListener(self.armc + "_pressure_l_periph_mon_node", PressureState, 
                                                         "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE, 
                                                         l_finger_periph_pressure_processor)
            self.perceptions["l_finger_periph_pressure"] = l_finger_periph_pressure_listener.read

        if "l_finger_pad_pressure" in self.percept_mon_list:
            l_finger_pad_pressure_processor = ft.partial(pressure_state_sum_processor, 
                                            right_finger_tip=False, indicies=range(7,22))
            l_finger_pad_pressure_listener = GenericListener(self.armc + "_pressure_l_pad_mon_node", PressureState, 
                                                         "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE, 
                                                         l_finger_pad_pressure_processor)
            self.perceptions["l_finger_pad_pressure"] = l_finger_pad_pressure_listener.read

        for k in self.perceptions:
            # Make sure we have recieved a message first
            self.perceptions[k](allow_duplication=False, willing_to_wait=True)
            self.perceptions[k] = ft.partial(self.perceptions[k], willing_to_wait=False, 
                                             quiet=True,
                                             warn=False, allow_duplication=True)

        if "gripper_pose" in self.percept_mon_list:
            def gripper_pose_lookup():
                if self.tf_listener is not None:
                    lct = self.tf_listener.getLatestCommonTime("/torso_lift_link", "/" + self.armc + "_wrist_roll_link")
                    pos, quat = self.tf_listener.lookupTransform("/torso_lift_link", "/" + self.armc + "_wrist_roll_link", lct)
                    return (lct.to_sec(), np.array(pos + quat))
                else:
                    return (0., np.array([0.]*7))

            self.perceptions["gripper_pose"] = gripper_pose_lookup


        # all callbacks should return data in this format:
        # (t, np.array([x1, x2, ...]))
        # t is time in seconds

        self.clear_vars()
        self.perception_started = True
        
        log("Finished initialization")

    ##
    # Initialize variables
    def clear_vars(self):
        self.datasets = {}
        self.models = {}
        for k in self.perceptions:
            self.datasets[k] = []
            self.models[k] = {"mean" : None, "variance" : None}

    ##
    # Begin capturing peception data for all of the listeners
    #
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def start_data_capture(self, duration=None):
        if not self.perception_started:
            err("Perception hasn't been started!")
            return False

        self.logger = RateCaller(self._gather_perception, self.SAMPLING_RATE)

        for k in self.perceptions:
            self.datasets[k] += [[]]
        self.logger.run()

        if not duration is None:
            threading.Timer(self.stop_data_capture, duration)

        return True

    def _gather_perception(self):
        for k in self.perceptions:
            self.datasets[k][-1].append(self.perceptions[k]())

    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    def stop_data_capture(self):
        self.logger.stop()
        num_datapts = len(self.datasets[self.datasets.keys()[0]][-1])
        log("%d datapoints collected", num_datapts)
        return num_datapts


    ##
    # do processing of signals in current coord group
    # sigs_proc contains a list of three signal sets
    # each corresponds to a list of convolutions of the signals
    # with a degree derivative of a gaussian with various
    # scales
    def process_signals(self, datasets):
        
        models = {}

        sum, num = 0., 0.
        for p in datasets:
            models[p] = {}
            i_sigs_proc = [ [[] for x in range(self.I_NUM_SIGMA)] for y in range(3)]
            t_sigs_proc = [ [[] for x in range(self.T_NUM_SIGMA)] for y in range(3)]
            data_list = datasets[p]
            num_coords = len(data_list[0][0][1])
            times = None
            for coord in range(num_coords):
                stream = data_list[0]
                def conv(v):
                    if type(v) == type([]) or type(v) == type(np.array([])):
                        return v[0]
                    return v
                signal = [conv(v) for v in zip(*zip(*stream)[1])[coord]]

                time_ind = np.array(zip(*stream)[0], dtype=np.float64)

                # normalize time indicies by offsetting to first value
                time_ind_beg = time_ind[0]
                for i, v in enumerate(time_ind):
                    time_ind[i] = float(time_ind[i] - time_ind_beg) 

                times = time_ind
                for deg in range(3):
                    for sigma_ind, sigma in enumerate(self.i_sigma_list):
                        s = self.impact_fgc.convolve_signal(signal, deg, sigma_ind)
                        i_sigs_proc[deg][sigma_ind].append(s)
                    for sigma_ind, sigma in enumerate(self.t_sigma_list):
                        s = self.type_fgc.convolve_signal(signal, deg, sigma_ind)
                        t_sigs_proc[deg][sigma_ind].append(s)

            if rospy.is_shutdown():
                sys.exit()

            models[p]["impact_features"] = [ [[] for x in range(self.I_NUM_SIGMA)] for y in range(3)]
            models[p]["type_features"] = [ [[] for x in range(self.T_NUM_SIGMA)] for y in range(3)]
            for deg in range(3):
                for sigma in range(self.I_NUM_SIGMA):
                    models[p]["impact_features"][deg][sigma] = zip(*i_sigs_proc[deg][sigma])
                for sigma in range(self.T_NUM_SIGMA):
                    models[p]["type_features"][deg][sigma] = zip(*t_sigs_proc[deg][sigma])
            models[p]["time"] = times
            models[p]["i_sigma_list"] = self.i_sigma_list
            models[p]["t_sigma_list"] = self.t_sigma_list

        return models

    # def ready_collision_detection(self):
    #     # wait for the classifiers to finish loading
    #     self.impact_classifier.finish_loading()
    #     # self.coll_type_classifier.finish_loading()
    #     self.detect_ready = True

    ##
    # Begin monitoring peception data to make sure it doesn't deviate from
    # the model provided.
    #
    # TODO DOCS
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def begin_collision_detection(self, dynamic_detection=False, 
                                  callback=None, static_collision_type=None,
                                  debug=False):

        if not self.perception_started:
            err("Perception hasn't been started!")
            return False
        # if not self.detect_ready:
        #     err("ready_collision_detection hasn't been called")
        #     return False

        self.is_dynamic = dynamic_detection
        self.callback = callback
        if not dynamic_detection:
            self._setup_collision_type(static_collision_type)
        self.collision_detected = False
        self.collision_finished = False
        self.debug = debug
        self.collision_type = "No collision"
        self.cur_iter = 0
        self.cur_inds = {}

        for k in self.perceptions:
            self.cur_inds[k] = 0

        self.i_data_buffers = {}
        self.t_data_buffers = {}
        for k in self.perceptions:
            self.i_data_buffers[k] = [ np.zeros((self.impact_fgc.filter_len, self.impact_fgc.filter_len)) for i in range(self.perception_lengths[k])]
            self.t_data_buffers[k] = [ np.zeros((self.type_fgc.filter_len, self.type_fgc.filter_len)) for i in range(self.perception_lengths[k])]
        self.end_monitor_lock = threading.Lock()

        self.data_thread_spawner_lock1 = threading.Lock()
        self.data_thread_spawner_lock2 = threading.Lock()
        self.data_thread_spawner = RateCaller(self._data_spawn_thread, self.SAMPLING_RATE ) 
        self.data_thread_spawner.run()

        self.beg_time = rospy.Time.now().to_sec()

        self.t_sum, self.t_num = 0., 0.
        self.normal_dict_counts = []
        self.temp_dict = {}
        for k in self.perceptions:
            self.temp_dict[k] = [[] for i in range(self.perception_lengths[k])]

        return True

    def _setup_collision_type(self, static_collision_type):
        if static_collision_type == "all":
            static_dict = {"accelerometer" : range(3),
                           "joint_angles" : range(7),
                           "joint_velocities" : range(7),
                           "joint_efforts" : range(7),
                           "r_finger_periph_pressure" : range(1),
                           "r_finger_pad_pressure" : range(1), 
                           "l_finger_periph_pressure" : range(1),
                           "l_finger_pad_pressure" : range(1),
                           "gripper_pose" : range(7)}
        elif static_collision_type in self.perception_names:
            static_dict = {static_collision_type : range(
                                    self.perception_lengths[static_collision_type])}
        elif static_collision_type == "pressure":
            static_dict = {"r_finger_periph_pressure" : range(1),
                           "r_finger_pad_pressure" : range(1), 
                           "l_finger_periph_pressure" : range(1),
                           "l_finger_pad_pressure" : range(1)}
        elif type(static_collision_type) == type({}):
            static_dict = static_collision_type
        else:
            err("Bad static_collision_type")
        self.static_dict = static_dict

    def _data_spawn_thread(self):
        # only one thread allowed to wait at a time
        # all other threads will pass through
        if self.data_thread_spawner_lock1.acquire(False):
            if self.data_thread_spawner_lock2.acquire(True):
                self.data_thread_spawner_lock1.acquire(False)
                self.data_thread_spawner_lock1.release()
                mt = self.DataThread(self)
                mt.start()

    class DataThread(threading.Thread):
        def __init__(self, apm):
            threading.Thread.__init__(self)
            self.apm = apm

        def run(self):
            self.apm._monitor_data_collector()
            self.apm.data_thread_spawner_lock2.acquire(False)
            self.apm.data_thread_spawner_lock2.release()

    def _monitor_data_collector(self):
        st_time = rospy.Time.now().to_sec()
        #########################################
        if rospy.is_shutdown():
            self._trigger_collision()
            self._finish_collision("rospy shutdown")
        add_data = []
        for k in self.perceptions:
            p_vals = self.perceptions[k]()[1]
            for i, v in enumerate(p_vals):
                # populate impact collision buffers
                for b in range(self.impact_fgc.filter_len - 1, -1, -1):
                    self.i_data_buffers[k][i][(
                        b - self.cur_iter) % self.impact_fgc.filter_len, b] = v
                    if self.debug:
                        if b == 0 and k == "joint_angles": # and ((i == 0) or (i == 1)):
                            print v,
                # populate collision type detection buffers
                for b in range(self.type_fgc.filter_len - 1, -1, -1):
                    self.t_data_buffers[k][i][(
                        b - self.cur_iter) % self.type_fgc.filter_len, b] = v

        if self.debug:
            print


        if self.is_dynamic:
            if not self.collision_detected:
                self._dynamic_collision_detection()
            else:
                self._dynamic_collision_classifying()
        else:
            self._static_collision_detection()
        self.cur_iter += 1
        #########################################
        end_time = rospy.Time.now().to_sec()
        self.t_sum += end_time - st_time
        self.t_num += 1.

    def _dynamic_collision_detection(self):

        if self.cur_iter < self.impact_fgc.filter_len:
            return
        instance = np.zeros((self.i_instance_len, 1))
        i_place = 0
        for k in self.PERCEPTION_ORDER:
            for coord in range(len(self.i_data_buffers[k])):
                for deg in range(3):
                    if deg not in self.I_DEGREE_DICT[k][coord]:
                        continue
                    for sigma_i, sigma_t in enumerate(self.i_sigma_list):
                        val = self.impact_fgc.convolve_pt(
                                self.i_data_buffers[k][coord][(
                                    self.cur_iter % self.impact_fgc.filter_len)], 
                                    deg, sigma_i)
                        instance[i_place, 0] = val
                        i_place += 1

        has_collided = self.impact_classifier.predict(instance)

        if has_collided and not self.debug:
            if not self.collision_detected:
                self._trigger_collision()


    def _dynamic_collision_classifying(self):

        if self.cur_iter < self.type_fgc.filter_len:
            # TODO HANDLE THIS CASE
            return
        if ((self.cur_iter - self.coll_iter) > self.type_fgc.filter_len / 2):
            return
        instance = np.zeros((self.t_instance_len, 1))
        i_place = 0
        for k in self.PERCEPTION_ORDER:
            for coord in range(len(self.t_data_buffers[k])):
                for deg in range(3):
                    if deg not in self.T_DEGREE_DICT[k][coord]:
                        continue
                    for sigma_i, sigma_t in enumerate(self.t_sigma_list):
                        val = self.type_fgc.convolve_pt(
                                self.t_data_buffers[k][coord][(
                                    self.cur_iter % self.type_fgc.filter_len)], 
                                    deg, sigma_i)
                        instance[i_place, 0] = val
                        i_place += 1

        collision_class = self.coll_type_classifier.predict(instance)

        if collision_class == 1.:
            type = "Environmental collision"
        else:
            type = "Table collision"
        self._finish_collision(type)


    def _static_collision_detection(self):
        if self.cur_iter < self.impact_fgc.filter_len:
            return
        for k in self.PERCEPTION_ORDER:
            if k not in self.static_dict:
                continue
            for coord in self.static_dict[k]:
                if k == "gripper_pose" and coord >= 3:
                    # Quaternions are unreliable, this should be fixed
                    # at some point
                    if self.debug:
                        self.temp_dict[k][coord].append(0.)
                    continue
                val = self.impact_fgc.convolve_pt(self.i_data_buffers[k][coord][self.cur_iter % self.impact_fgc.filter_len], 1, self.STATIC_SIGMA_INDEX)
                if self.debug:
                    self.temp_dict[k][coord].append(val)
                if (np.fabs(val) >= self.STATIC_DERIV_THRESH[k][coord] 
                    and not self.debug):
                    print "Collision:", val, "Thresh:", self.STATIC_DERIV_THRESH[k][coord]
                    self._trigger_collision()
                    self._finish_collision(k + " collision", coord)
                # if k == "r_finger_pad_pressure" and random.randint(0,40) == 0:
                #     print val


    def _trigger_collision(self):
        self.collision_detected = True
        self.coll_iter = self.cur_iter - self.impact_fgc.filter_len / 2
        if not self.callback is None:
            self.callback()

    def _finish_collision(self, type = "Unlabled", coord = 0):
        if not self.collision_finished:
            print '-' * 60
            print
            print '                       %s detected' % type
            print
            print '-' * 60
            log("%s reported" % type)
            self.collision_type = type
            self.collision_coord = coord
            if not self.callback is None:
                self.callback()
            self.collision_finished = True
            self.end_collision_detection()
            
    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    def end_collision_detection(self):
        while (not self.collision_finished and self.collision_detected 
                and not rospy.is_shutdown()):
            rospy.sleep(0.01)
        with self.end_monitor_lock:
            if not self.data_thread_spawner is None:
                print "Avg cvpt: %1.9f" % (self.t_sum / self.t_num)
                print "cvpt sum: %1.5f" % (self.t_sum)
                print self.t_num
                self.data_thread_spawner.stop()
                self.data_thread_spawner_lock1.acquire(False)
                self.data_thread_spawner_lock1.release()
                self.data_thread_spawner_lock2.acquire(False)
                self.data_thread_spawner_lock2.release()
                self.data_thread_spawner = None
            return
Ejemplo n.º 8
0
    def __init__(self, arm, tf_listener=None, rate=0.001, 
                 percept_mon_list=None, model_zeros=None):
        log("Initializing arm perception listeners")

        self.rate = rate

        if arm == 0:
            armc = "r"
            is_right_arm = True
        else:
            armc = "l"
            is_right_arm = False

        self.model_zeros = model_zeros

        self.perceptions = {}

        self.perception_names = [ "accelerometer",
                                  "joint_angles",
                                  "joint_velocities",
                                  "joint_efforts",
                                  "r_finger_periph_pressure",
                                  "r_finger_pad_pressure", 
                                  "l_finger_periph_pressure",
                                  "l_finger_pad_pressure",
                                  "gripper_pose"]

        if percept_mon_list is None:
            percept_mon_list = self.perception_names

        if "accelerometer" in percept_mon_list:
            accel_listener = GenericListener("accel_mon_node", AccelerometerState, 
                                     "accelerometer/" + armc + "_gripper_motor",
                                     rate, accel_state_processor)
            self.perceptions["accelerometer"] = accel_listener.read

        if "joint_angles" in percept_mon_list:
            joint_angles_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=0)
            joint_angles_listener = GenericListener("joint_angles_mon_node", JointState, 
                                               "joint_states", rate, joint_angles_processor)
            self.perceptions["joint_angles"] = joint_angles_listener.read

        if "joint_velocities" in percept_mon_list:
            joint_velocities_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=1)
            joint_velocities_listener = GenericListener("joint_efforts_mon_node", JointState, 
                                              "joint_states", rate, joint_velocities_processor)
            self.perceptions["joint_velocities"] = joint_velocities_listener.read

        if "joint_efforts" in percept_mon_list:
            joint_efforts_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=2)
            joint_efforts_listener = GenericListener("joint_efforts_mon_node", JointState, 
                                              "joint_states", rate, joint_efforts_processor)
            self.perceptions["joint_efforts"] = joint_efforts_listener.read

        if "r_finger_periph_pressure" in percept_mon_list:
            r_finger_periph_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=True, indicies=range(1,7))
            r_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         r_finger_periph_pressure_processor)
            self.perceptions["r_finger_periph_pressure"] = r_finger_periph_pressure_listener.read

        if "r_finger_pad_pressure" in percept_mon_list:
            r_finger_pad_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=True, indicies=range(7,22))
            r_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         r_finger_pad_pressure_processor)
            self.perceptions["r_finger_pad_pressure"] = r_finger_pad_pressure_listener.read

        if "l_finger_periph_pressure" in percept_mon_list:
            l_finger_periph_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=False, indicies=range(1,7))
            l_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         l_finger_periph_pressure_processor)
            self.perceptions["l_finger_periph_pressure"] = l_finger_periph_pressure_listener.read

        if "l_finger_pad_pressure" in percept_mon_list:
            l_finger_pad_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=False, indicies=range(7,22))
            l_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         l_finger_pad_pressure_processor)
            self.perceptions["l_finger_pad_pressure"] = l_finger_pad_pressure_listener.read

        for k in self.perceptions:
            self.perceptions[k] = ft.partial(self.perceptions[k], willing_to_wait=False, 
                                             quiet=True,
                                             warn=False, allow_duplication=True)

        if "gripper_pose" in percept_mon_list:
            def gripper_pose_lookup():
                lct = tf_listener.getLatestCommonTime("/torso_lift_link", "/" + armc + "_wrist_roll_link")
                pos, quat = tf_listener.lookupTransform("/torso_lift_link", "/" + armc + "_wrist_roll_link", lct)
                return (lct.to_nsec(), np.array(pos + quat))

            self.perceptions["gripper_pose"] = gripper_pose_lookup


        # all callbacks should return data in this format:
        # (t, np.array([x1, x2, ...]))
        # t is time in nanoseconds



        self.clear_vars()

        self.logger = RateCaller(self._gather_perception, self.rate)
        
        log("Finished initialization")
Ejemplo n.º 9
0
class ArmPerceptionMonitor( ):

    ##
    # Initializes the listeners on the perception topics
    #
    # @param arm 0 if right, 1 if left
    # @param rate the rate at which the perception should capture states
    # @param percept_mon_list list of perceptions to monitor; if None, do all
    def __init__(self, arm, tf_listener=None, rate=0.001, 
                 percept_mon_list=None, model_zeros=None):
        log("Initializing arm perception listeners")

        self.rate = rate

        if arm == 0:
            armc = "r"
            is_right_arm = True
        else:
            armc = "l"
            is_right_arm = False

        self.model_zeros = model_zeros

        self.perceptions = {}

        self.perception_names = [ "accelerometer",
                                  "joint_angles",
                                  "joint_velocities",
                                  "joint_efforts",
                                  "r_finger_periph_pressure",
                                  "r_finger_pad_pressure", 
                                  "l_finger_periph_pressure",
                                  "l_finger_pad_pressure",
                                  "gripper_pose"]

        if percept_mon_list is None:
            percept_mon_list = self.perception_names

        if "accelerometer" in percept_mon_list:
            accel_listener = GenericListener("accel_mon_node", AccelerometerState, 
                                     "accelerometer/" + armc + "_gripper_motor",
                                     rate, accel_state_processor)
            self.perceptions["accelerometer"] = accel_listener.read

        if "joint_angles" in percept_mon_list:
            joint_angles_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=0)
            joint_angles_listener = GenericListener("joint_angles_mon_node", JointState, 
                                               "joint_states", rate, joint_angles_processor)
            self.perceptions["joint_angles"] = joint_angles_listener.read

        if "joint_velocities" in percept_mon_list:
            joint_velocities_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=1)
            joint_velocities_listener = GenericListener("joint_efforts_mon_node", JointState, 
                                              "joint_states", rate, joint_velocities_processor)
            self.perceptions["joint_velocities"] = joint_velocities_listener.read

        if "joint_efforts" in percept_mon_list:
            joint_efforts_processor = ft.partial(joints_state_processor, 
                                                right_arm=is_right_arm, 
                                                angles_velocities_efforts=2)
            joint_efforts_listener = GenericListener("joint_efforts_mon_node", JointState, 
                                              "joint_states", rate, joint_efforts_processor)
            self.perceptions["joint_efforts"] = joint_efforts_listener.read

        if "r_finger_periph_pressure" in percept_mon_list:
            r_finger_periph_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=True, indicies=range(1,7))
            r_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         r_finger_periph_pressure_processor)
            self.perceptions["r_finger_periph_pressure"] = r_finger_periph_pressure_listener.read

        if "r_finger_pad_pressure" in percept_mon_list:
            r_finger_pad_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=True, indicies=range(7,22))
            r_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         r_finger_pad_pressure_processor)
            self.perceptions["r_finger_pad_pressure"] = r_finger_pad_pressure_listener.read

        if "l_finger_periph_pressure" in percept_mon_list:
            l_finger_periph_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=False, indicies=range(1,7))
            l_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         l_finger_periph_pressure_processor)
            self.perceptions["l_finger_periph_pressure"] = l_finger_periph_pressure_listener.read

        if "l_finger_pad_pressure" in percept_mon_list:
            l_finger_pad_pressure_processor = ft.partial(pressure_state_processor, 
                                            right_finger_tip=False, indicies=range(7,22))
            l_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState, 
                                                         "pressure/" + armc + "_gripper_motor", rate, 
                                                         l_finger_pad_pressure_processor)
            self.perceptions["l_finger_pad_pressure"] = l_finger_pad_pressure_listener.read

        for k in self.perceptions:
            self.perceptions[k] = ft.partial(self.perceptions[k], willing_to_wait=False, 
                                             quiet=True,
                                             warn=False, allow_duplication=True)

        if "gripper_pose" in percept_mon_list:
            def gripper_pose_lookup():
                lct = tf_listener.getLatestCommonTime("/torso_lift_link", "/" + armc + "_wrist_roll_link")
                pos, quat = tf_listener.lookupTransform("/torso_lift_link", "/" + armc + "_wrist_roll_link", lct)
                return (lct.to_nsec(), np.array(pos + quat))

            self.perceptions["gripper_pose"] = gripper_pose_lookup


        # all callbacks should return data in this format:
        # (t, np.array([x1, x2, ...]))
        # t is time in nanoseconds



        self.clear_vars()

        self.logger = RateCaller(self._gather_perception, self.rate)
        
        log("Finished initialization")

    def _gather_perception(self):
        t1 = rospy.Time.now().to_sec()
        for k in self.perceptions:
            self.datasets[k][-1] += [self.perceptions[k]()]
        t2 = rospy.Time.now().to_sec()
        import random
        if random.randint(0,100)==0:
            print "Time:", t1-t2

    ##
    # Initialize variables
    def clear_vars(self):
        self.datasets = {}
        self.models = {}
        for k in self.perceptions:
            self.datasets[k] = []
            self.models[k] = {"mean" : None, "variance" : None}

        self.active = False

    ##
    # Begin capturing peception data for all of the listeners
    #
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def start_training(self, duration=None):
        if self.active:
            log("Perception already active.")
            return
        self.active = True

        for k in self.perceptions:
            self.datasets[k] += [[]]
        self.logger.run()

        if not duration is None:
            threading.Timer(self.stop_training, duration)

    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    def stop_training(self):
        if not self.active:
            log("Nothing to stop.")
            return

        self.logger.stop()

        self.active = False
        return len(self.datasets[self.datasets.keys()[0]][-1])

    ##
    # Save training data as a pickle with given filename
    #
    # @param filename name of the pickle
    def save(self, filename):
        save_pickle((self.datasets, self.models), filename)

    ##
    # Load training data as a pickle with given filename
    #
    # @param filename name of the pickle
    def load(self, filename):
        self.datasets, self.models = load_pickle(filename)

    ##
    # Generates model functions of all the perceptions over several
    # identical trajectories. Each of the parameters is a dictionary
    # directing perceptions to their parameters.
    # 
    # @param smooth_wind_dict the window size of the smoothing function
    # @param var_wind_dict window size of the variance function
    # @param var_smooth_wind_dict window size of the smoothing function on the variance
    # @return mean function, variance function
    # def generate_models(self, smooth_wind_dict=None, var_wind_dict=None):

    #     smooth_wind_default = 234
    #     var_wind_default = 400
    #     var_smooth_wind = 143
    #     for perception in self.perceptions:
    #         data_list = self.datasets[perception]
    #         
    #         if data_list is None:
    #             log("No data to generate model for")
    #             return None

    #         if self.active:
    #             log("Perception already active.")
    #             return None

    #         # get the minimum model length
    #         lens = [len(m) for m in data_list]
    #         max_len = np.max(lens)

    #         # dynamic finding of parameters
    #         if smooth_wind_dict is None or smooth_wind_dict[perception] is None:
    #             smooth_wind = smooth_wind_default
    #         else:
    #             smooth_wind = smooth_wind_dict[perception]

    #         ret_means, ret_vars, ret_mean_models, ret_noise_vars = [], [], [], []
    #         ret_times, noise_vars = [], []
    #         # find the number of coordinates from the first element
    #         num_coords = len(data_list[0][0][1])
    #         for coord in range(num_coords):
    #             mean_models, variance_models = [], []
    #             times = None
    #             for stream in data_list:
    #                 # extract only the data stream for a single coordinate (all x values)
    #                 stream_coord = np.array(zip(*zip(*stream)[1])[coord])
    #                 cur_mean_model = signal_smooth(stream_coord, smooth_wind)
    #                 mean_models += [cur_mean_model]
    #             
    #                 # sum up the squared difference over the whole model
    #                 noise_vars += [ ( sum([(x - y) ** 2 for x,y in zip(cur_mean_model,stream_coord)]) /
    #                                                  len(cur_mean_model) ) ]

    #             # find the average case over the several runs
    #             avg_means_model = np.array([0.] * max_len)
    #             for i in range(max_len):
    #                 n = 0
    #                 for j in range(len(mean_models)):
    #                     if i < len(mean_models[j]):
    #                         avg_means_model[i] += mean_models[j][i]
    #                         n += 1
    #                 avg_means_model[i] /= n

    #             if var_wind_dict is None or var_wind_dict[perception] is None:
    #                 var_wind = var_wind_default
    #             else:
    #                 var_wind = var_wind_dict[perception]
    #             # find the variance of the signal but use var_wind points around the centers
    #             # to increase the sample size
    #             vars_model = signal_list_variance(mean_models, avg_means_model, var_wind)
    #             vars_model = signal_smooth(vars_model, var_smooth_wind)
    #             vars_model = signal_smooth(vars_model, var_smooth_wind + 23)

    #             ret_times += [times]
    #             ret_means += [avg_means_model]
    #             ret_vars += [vars_model]
    #             ret_mean_models += [mean_models]
    #             ret_noise_vars += [np.average(noise_vars)]

    #         # TODO deal with timestamp data in some way?
    #         # self.models[perception]["time"] = ret_times
    #         self.models[perception]["mean"] = np.array(zip(*ret_means))
    #         self.models[perception]["variance"] = np.array(zip(*ret_vars))
    #         a = ret_mean_models
    #         b = []
    #         for stream in range(len(a[0])):
    #             t1 = []
    #             for val in range(len(a[0][0])):
    #                     t2 = []
    #                     for coord in range(len(a)):
    #                             if val < len(a[coord][stream]):
    #                                 t2 += [a[coord][stream][val]]
    #                     t1 += [np.array(t2)]
    #             b += [t1]
 
    #         self.models[perception]["smoothed_signals"] = b
    #         self.models[perception]["noise_variance"] = np.array(ret_noise_vars)

    #     return self.models

    def get_zeros(self, time=4.):
        self._zeros = {}
        for k in self.perceptions:
            self._zeros[k] = None
        self._n = 0
            
        monitor = RateCaller(self._sum_values, self.rate)
        monitor.run()
        rospy.sleep(time)
        monitor.stop()

        for k in self.perceptions:
            self._zeros[k] /= self._n
        return self._zeros

    def _sum_values(self):
        for k in self.perceptions:
            add_data = np.array(self.perceptions[k]()[1])
            if self._zeros[k] is None:
                self._zeros[k] = np.copy(add_data)
            else:
                self._zeros[k] += add_data
        self._n += 1

    ##
    # Sets up monitoring parameters
    def setup_monitoring(self, models,
                               std_dev_dict=None, noise_dev_dict=None, duration=None,
                               std_dev_default=2.0, noise_dev_default=0.25,
                               tol_thresh_dict=None,
                               contingency=None, window_size=70, current_zeros=None,
                               sampling_rate = 1,
                               transform_dict=None, verbose=True, collide=True):
        self.std_dev_default = std_dev_default
        self.noise_dev_default = noise_dev_default

        self.models = models
        self.current_data = {}
        self.std_dev_dict = std_dev_dict
        self.noise_dev_dict = {}
        self.contingency = contingency
        self.window_size = window_size
        self.current_zeros = {}
        self.transform_dict = transform_dict
        self.tol_thresh_dict = tol_thresh_dict
        self.sampling_rate = sampling_rate
        self.verbose = verbose
        self.collide = collide
        self.sum_data = {}
        self.cur_pt = 0
        self.failure = False
        self.dur_timer = None
        self.avg_list = {}
        self.cum_avg = {}
        self.c = {}
        self.locks = {}

        for k in self.perceptions:
            self.current_data[k] = [None] * window_size
            self.sum_data[k] = None
            self.avg_list[k] = []
            self.c[k] = None
            self.locks[k] = threading.Lock()

        self.create_max_min()


    # Checks percept to see if the model indicates a collision with the current
    # smoothed perception avg
    # Returns index of perception and difference if collision, -1, 0 else
    def create_max_min(self):
        for k in self.models:
            deviation = self.models[k]["variance"]
            # noise_deviation = np.sqrt(self.models[k]["noise_variance"])
            if self.std_dev_dict is not None and k in self.std_dev_dict:
                std_dev = np.array(self.std_dev_dict[k])
            else:
                std_dev = self.std_dev_default
            # if self.noise_dev_dict is not None and k in self.noise_dev_dict:
            #     noise_dev = np.array(self.noise_dev_dict[k])
            # else:
            #     noise_dev = self.noise_dev_default
            if self.tol_thresh_dict is not None and k in self.tol_thresh_dict:
                tol_thresh = np.array(self.tol_thresh_dict[k])
            else:
                tol_thresh = 0.

            self.models[k]["dev"] = (std_dev * deviation + tol_thresh)
            # self.models[k]["dev"] = (std_dev * deviation + noise_dev * noise_deviation + tol_thresh)


            # This is the monitoring equation
            self.models[k]["max"] = self.models[k]["mean"] + self.models[k]["dev"]
            self.models[k]["min"] = self.models[k]["mean"] - self.models[k]["dev"]

    ##
    # Begin monitoring peception data to make sure it doesn't deviate from
    # the model provided.
    #
    # TODO DOCS
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def begin_monitoring(self, models=None, model_zeros=None,
                               std_dev_dict=None, noise_dev_dict=None, duration=None,
                               std_dev_default=2.0, noise_dev_default=0.25,
                               tol_thresh_dict=None,
                               contingency=None, window_size=70, current_zeros=None,
                               sampling_rate = 1,
                               only_pressure=False,
                               transform_dict=None, verbose=True, collide=True):
        if self.active:
            log("Perception already active.")
            return
        self.active = True


        self.setup_monitoring(models, 
                               std_dev_dict=std_dev_dict, noise_dev_dict=noise_dev_dict,
                               duration=duration, std_dev_default=std_dev_default, 
                               noise_dev_default=noise_dev_default,                         
                               tol_thresh_dict=tol_thresh_dict,
                               contingency=contingency, window_size=window_size, 
                               sampling_rate=sampling_rate,
                               current_zeros=current_zeros, transform_dict=transform_dict, 
                               verbose=verbose, collide=collide)
        # if models is not None:
        #     self.models = models
        if model_zeros is not None:
            self.model_zeros = model_zeros
        self.cur_pt = 0
        self.collision_times = {}
        self.collision_sums = {}
        self.cur_col_time = 0
        self.current_zeros = {}
        self.z_sum = {}
        self.z_rsum = {}
        self.z_list = {}
        self.min_prob = 100000.0
        self.cur_mals = {}
        self.mals = None
        self.only_pressure = only_pressure

        self.monitor = RateCaller(self._monitor_data, self.rate * self.sampling_rate)
        self.monitor.run()
        # self.sumcalls = {}
        # self.sumsave = {}

        if not duration is None:
            self.dur_timer = threading.Timer(self.end_monitoring, duration)
            self.dur_timer.start()

    def _stable_summer(self, percept, data):
        # kahanSum
        k = percept
        # if percept not in self.sumcalls:
        #     self.sumcalls[percept] = 1
        #     self.sumsave[percept] = []
        # else:
        #     self.sumcalls[percept] += 1

        if self.c[k] is None:
            self.c[k] = np.array([0.]*len(data))
        if self.sum_data[k] is None:
            self.sum_data[k] = np.copy(data)
        else:
            y = data - self.c[k]
            t = self.sum_data[k] + y
            self.c[k] = (t - self.sum_data[k]) - y
            self.sum_data[k] = t
            # self.sumsave[percept] += [self.sum_data[k]]


    def _monitor_data(self):
        avg_dict = {}
        sttime = rospy.Time.now().to_sec()
        for k in self.perceptions:
            # update the running sum
            add_data = self.perceptions[k]()[1]

            # apply necessary transforms
            # if self.transform_dict is not None:
            #     if k in self.transform_dict:
            #         add_data = self.transform_dict[k](add_data)
                    
            self._stable_summer(k, add_data)

            # If we have enough data to monitor, we check to see if the values are in range
            if self.cur_pt >= self.window_size: #self.current_data[k].full():
                # self.prev_sum = copy.deepcopy(self.sum_data)
                avg_dict[k] = self.sum_data[k] / self.window_size
                if not k in self.current_zeros:
                    if not self.only_pressure:
                        mean = np.array(self.models[k]["mean"][self.cur_pt * self.sampling_rate])
                    else:
                        mean = np.array(self.models[k]["mean"][0])
                    self.current_zeros[k] = mean - avg_dict[k]
                    # self.current_zeros[k] = - avg_dict[k]

                avg_dict[k] += self.current_zeros[k]
                self.avg_list[k] += [avg_dict[k]] * self.sampling_rate
                # if self.cur_pt == self.window_size:
                #     print self.avg_list[k]

                # self.prev_avg = copy.deepcopy(avg)
                # offset zeros into original perception frame
                # if self.current_zeros is not None:
                #     if False and self.model_zeros is not None:
                #         avg_dict[k] += self.model_zeros[k] - self.current_zeros[k]
                #     else:
                #         # this is hacky, need to use zeros during training instead of first pt
                #         # print "NOOOOOOOOOOOO!\n"*10
                #         avg_dict[k] +=  np.array(self.models[k]["mean"][self.window_size]) - self.current_zeros[k]

                # if collision_detected:
                #     log("Value %d of the perception %s failed with difference %f"
                #                                                % (index, k, diff))
                #   # if diff > 5. and k == "accelerometer":
                #   #     print "avg_list", self.avg_list
                #   #     print "avg", avg
                #   #     for i in range(self.window_size+1):
                #   #         d = self.current_data[k].get()
                #   #         print d
                #   #         self.current_data[k].put(d)


                #   self.failure = True
                #   self.contingency()
                #   self.monitor.stop()

                rem_data = self.current_data[k][self.cur_pt % self.window_size]
                self._stable_summer(k, -rem_data)

            self.current_data[k][self.cur_pt % self.window_size] = add_data

        if self.cur_pt >= self.window_size: #self.current_data[k].full():
            collision_detected = self.collision_detect(avg_dict)
            if self.collide and collision_detected:
                print "collision reported"
                self.failure = True
                if not self.contingency is None:
                    self.contingency()
                self.monitor.stop()
                return


        self.cur_pt += 1

        if not self.only_pressure:
            if self.cur_pt * self.sampling_rate >= 1.00 * len(self.models[self.models.keys()[0]]["mean"]):
                print "ending early:", self.cur_pt * self.sampling_rate
                self.end_monitoring()
        endtime = rospy.Time.now().to_sec()
        # if self.cur_pt % 10 == 0:
        #     print "dur:", sttime - endtime



    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    # TODO DOCS
    def end_monitoring(self):
        if not self.active:
            log("Nothing to stop.")
            return self.avg_list 

        print "-------------------------"
        print "z averages:"
        self.z_avg = {}
        for k in self.z_sum:
            self.z_avg[k] = self.z_sum[k] / self.cur_pt
            print k, ":", self.z_avg[k]
        print "-------------------------"
        print "MINIMUM PROB"
        print self.min_prob
        print "-------------------------"
        # print "Sum calls", self.sumcalls
        # if self.sumcalls["r_finger_periph_pressure"] == 202:
        #     print self.sumsave
        self.monitor.stop()
        if self.dur_timer is not None:
            self.dur_timer.cancel()
            self.dur_timer = None

        self.active = False
        return self.avg_list

    # Checks percept to see if the model indicates a collision with the current
    # smoothed perception avg
    # Returns index of perception and difference if collision, -1, 0 else
    def collision_detect(self, avg_dict):
        # is_outside_range_dict = {}
        z = {}
        mal_sum = 0.
        for k in avg_dict:

            # This is the monitoring equation
            # is_outside_range_dict[k] = ((avg_dict[k] > self.models[k]["max"][self.cur_pt * self.sampling_rate]) +
            #                             (avg_dict[k] < self.models[k]["min"][self.cur_pt * self.sampling_rate]))
            # Uses both variance from various grasp tries and general noise variance
            if not self.only_pressure:
                mean = self.models[k]["mean"][self.cur_pt * self.sampling_rate]
                dev = self.models[k]["dev"][self.cur_pt * self.sampling_rate]
            else:
                mean = self.models[k]["mean"][0]
                dev = self.models[k]["dev"][0]

            cur_mal = ((avg_dict[k] - mean) / dev) ** 2
            self.cur_mals[k] = cur_mal
            mal_sum += np.add.reduce( ( (avg_dict[k] - mean) / dev) ** 2 )
            z[k] = np.fabs(avg_dict[k] - mean) / dev 

            if not k in self.z_sum:
                self.z_sum[k] = np.copy(z[k])
            else:
                self.z_sum[k] += z[k]

        # print "incoldet"
        # collision_detected = self.collision_filtering(is_outside_range_dict)

        # collision_detected = self.collision_filtering_mal(np.sqrt(mal_sum))
        # return collision_detected

        collision_detected = self.collision_filtering(z)

        # print "coldete", collision_detected
        return collision_detected

    def collision_filtering_mal(self, mal):
        prob = 1.
        TIME_WINDOW = 4
        if self.mals is None:
            self.mals = np.array([0.] * TIME_WINDOW)
        self.mals[self.cur_pt % TIME_WINDOW] = mal

        if self.cur_pt < TIME_WINDOW:
            return False
        mal_avg = np.add.reduce(self.mals / TIME_WINDOW)

        MAL_THRESH = 8.0
        if mal_avg > MAL_THRESH:
            print "Collision with mal dist:", mal_avg
            for k in self.cur_mals:
                print k, self.cur_mals[k]
            return True
        return False

    def collision_filtering(self, z):
        prob = 1.
        TIME_WINDOW = 4
        for k in z:
            if not k in self.z_rsum:
                self.z_rsum[k] = np.copy(z[k])
                self.z_list[k] = [np.array([0.]*len(z[k]))] * TIME_WINDOW
            else:
                self.z_rsum[k] += z[k]
            
            self.z_rsum[k] -= self.z_list[k][self.cur_pt % TIME_WINDOW]
            self.z_list[k][self.cur_pt % TIME_WINDOW] = z[k]

            # print z[k]
            prob *= np.multiply.reduce(2 * stats.norm.cdf(-self.z_rsum[k] / TIME_WINDOW))
            # print prob

        if self.cur_pt < TIME_WINDOW:
            return False

        if prob < self.min_prob:
            self.min_prob = prob

        # CONFIDENCE = 1e-6
        if not self.only_pressure:
            # 4e-7 50/50
            CONFIDENCE = 2.0e-6
        else:
            # pressure only confidence
            CONFIDENCE = 1.e-5
        if self.cur_pt % 20 == 0:
            print "Current pt:", self.cur_pt, ", probability:", prob
        if prob < CONFIDENCE:
            print "probability:", prob
            print "collision returned"
            return True
    #       print "-------------------------"
    #       print num_percep_col
    #       for k in sig_counts:
    #           if sig_counts[k] >= PERCEPT_SIG_REQ[k]:
    #               print k, ":", sig_counts[k]
    #       print "-------------------------"
    #       return True

        return False

    # def collision_filtering(self, is_outside_range_dict):
    #     isord = is_outside_range_dict
    #     TIME_WINDOW = 10
    #     NUM_PERCEPT_COL_REQ = 2
    #     PERCEPT_SIG_REQ = { "accelerometer" : 2,
    #                         "joint_angles" : 2,
    #                         "joint_velocities" : 3,
    #                         "joint_efforts" : 2,
    #                         "r_finger_periph_pressure" : 2,
    #                         "r_finger_pad_pressure" : 5, 
    #                         "l_finger_periph_pressure" : 2,
    #                         "l_finger_pad_pressure" : 5 }
    #     num_percep_col = 0
    #     sig_counts = {}
    #     num_periph = 0
    #     for k in isord:
    #         # if any(isord[k]):
    #         #     print k, isord
    #         if not k in self.collision_times:
    #             self.collision_times[k] = [np.array([0] * len(isord[k]))] * TIME_WINDOW
    #             self.collision_sums[k] = np.array([0] * len(isord[k]))

    #         # def queue_sum(q):
    #         #     sum = None
    #         #     for i in range(TIME_WINDOW):
    #         #         val = q.get()
    #         #         if sum is None:
    #         #             sum = np.copy(val)
    #         #         sum += val
    #         #         q.put(val)
    #         #     return sum
    #         # c_sum = queue_sum(self.collision_times[k])

    #         self.collision_sums[k] -= self.collision_times[k][self.cur_col_time]
    #         self.cur_col_time = (self.cur_col_time + 1) % TIME_WINDOW
    #         self.collision_times[k][self.cur_col_time] = np.int32(np.array(isord[k]))
    #         self.collision_sums[k] += self.collision_times[k][self.cur_col_time]
    #         def g0(x):
    #             if x > 0:
    #                 return 1
    #             return 0
    #         sig_count = np.sum(map(g0, self.collision_sums[k]))
    #         sig_counts[k] = sig_count
    #         if sig_count >= PERCEPT_SIG_REQ[k]:
    #             # print "cols", k, isord[k]
    #             if k == "r_finger_periph_pressure" or k == "l_finger_periph_pressure":
    #                 num_periph += 1
    #             num_percep_col += 1

    #     # num_periph != 1 eliminates side object collision possibilities
    #     if num_percep_col >= NUM_PERCEPT_COL_REQ and num_periph != 1:            
    #         print "collision returned"
    #         print "-------------------------"
    #         print num_percep_col
    #         for k in sig_counts:
    #             if sig_counts[k] >= PERCEPT_SIG_REQ[k]:
    #                 print k, ":", sig_counts[k]
    #         print "-------------------------"
    #         return True

    #     return False

        # if self.collide and any(is_outside_range):
        #     # sensor has fallen outside acceptable range, trigger
        #     for i, x in enumerate(is_outside_range):
        #         if x:
        #             # print "Average:", avg
        #             # print "Last avg:", self.prev_avg
        #             # print "Prev sum:", self.prev_sum
        #             # print "MOD:", self.model_zeros[k]
        #             # print "MON:", self.current_zeros[k]
        #             return i, diff[i]
        # return -1, 0.

# @TODO add sampling_rate stuff
    def simulate_monitoring(self, monitor_data, models=None, model_zeros=None,
                               std_dev_dict=None, noise_dev_dict=None, 
                               duration=None,
                               std_dev_default=2.0, noise_dev_default=0.25,
                               tol_thresh_dict=None,
                               contingency=None, window_size=70, current_zeros=None,
                               transform_dict=None, verbose=True, collide=True):
        self.setup_monitoring(std_dev_dict=std_dev_dict, noise_dev_dict=noise_dev_dict,
                               duration=duration, std_dev_default=std_dev_default, 
                               tol_thresh_dict=tol_thresh_dict,
                               noise_dev_default=noise_dev_default,                                                         contingency=contingency, window_size=window_size, 
                               current_zeros=current_zeros, transform_dict=transform_dict, 
                               verbose=verbose, collide=collide)
        if models is not None:
            self.models = models
        if model_zeros is not None:
            self.model_zeros = model_zeros
        self.cur_pt = self.window_size
        collision = None
        # i is the number of samples in the monitor data
        for i in range(len(monitor_data[monitor_data.keys()[0]])):
            for k in monitor_data:
                index, diff = self.collision_detect(k, monitor_data[k][i])
                if index != -1:
                    collision = (k, index, diff)
            self.cur_pt += 1
        return collision

    def resample_and_thin_data(self, monitor_data, sample_rate):
        ret_data = {}
        for k in monitor_data:
            ret_data[k] = {}
            ret_data[k]["mean"] = monitor_data[k]["mean"][::sample_rate]
            ret_data[k]["variance"] = monitor_data[k]["variance"][::sample_rate]
        return ret_data
Ejemplo n.º 10
0
class ArmPerceptionMonitor():

    ##
    # Initializes internal variables
    #
    # @param arm 0 if right, 1 if left
    # @param percept_mon_list list of perceptions to monitor; if None, do all
    def __init__(self, arm):
        self.load_parameters()
        ############## Classifiers #################
        self.impact_classifier = cfiers.classifiers_dict[
            "small_refined_random_forest"]
        self.coll_type_classifier = cfiers.classifiers_dict[
            "large_refined_random_forest"]

        log("Loading impact classifier")
        self.impact_classifier.load(self.I_RTREE_CLASSIFIER)
        log("Loading collision type classifier")
        self.coll_type_classifier.load(self.T_RTREE_CLASSIFIER)

        if arm == 0:
            self.armc = "r"
            self.is_right_arm = True
        else:
            self.armc = "l"
            self.is_right_arm = False

        self.perceptions = {}

        self.perception_names = [
            "accelerometer", "joint_angles", "joint_velocities",
            "joint_efforts", "r_finger_periph_pressure",
            "r_finger_pad_pressure", "l_finger_periph_pressure",
            "l_finger_pad_pressure", "gripper_pose"
        ]
        self.perception_lengths = {
            "accelerometer": 3,
            "joint_angles": 7,
            "joint_velocities": 7,
            "joint_efforts": 7,
            "r_finger_periph_pressure": 1,
            "r_finger_pad_pressure": 1,
            "l_finger_periph_pressure": 1,
            "l_finger_pad_pressure": 1,
            "gripper_pose": 7
        }

        self.percept_mon_list = self.perception_names

        self.impact_fgc = FastGaussConvolve(self.SAMPLING_RATE,
                                            self.I_FILTER_BEG_DELAY,
                                            self.I_FILTER_CEN_DELAY,
                                            self.i_sigma_list)
        self.type_fgc = FastGaussConvolve(self.SAMPLING_RATE,
                                          self.T_FILTER_BEG_DELAY,
                                          self.T_FILTER_CEN_DELAY,
                                          self.t_sigma_list)

        self.perception_started = False
        # self.detect_ready = False
        self.fos = FileOperations()

    def load_parameters(self):
        self.SAMPLING_RATE = rospy.get_param(
            "/overhead_grasping/data_sampling_rate")
        self.PERCEPTION_ORDER = rospy.get_param(
            "/overhead_grasping/perception_order")

        # impact parameters
        self.I_FILTER_BEG_DELAY = rospy.get_param(
            "/overhead_grasping/i_filter_beg_delay")
        self.I_FILTER_CEN_DELAY = rospy.get_param(
            "/overhead_grasping/i_filter_cen_delay")
        self.I_MIN_SIGMA = rospy.get_param("/overhead_grasping/i_min_sigma")
        self.I_MAX_SIGMA = rospy.get_param("/overhead_grasping/i_max_sigma")
        self.I_NUM_SIGMA = rospy.get_param("/overhead_grasping/i_num_sigma")
        self.i_sigma_list = np.linspace(self.I_MIN_SIGMA, self.I_MAX_SIGMA,
                                        self.I_NUM_SIGMA)
        self.I_DEGREE_DICT = rospy.get_param(
            "/overhead_grasping/i_degree_dict")
        self.i_instance_len = 0
        for k in self.PERCEPTION_ORDER:
            for coord in self.I_DEGREE_DICT[k]:
                for deg in coord:
                    self.i_instance_len += self.I_NUM_SIGMA
        self.I_RTREE_CLASSIFIER = rospy.get_param(
            "/overhead_grasping/i_rtree_classifier")

        # collision type parameters
        self.T_FILTER_BEG_DELAY = rospy.get_param(
            "/overhead_grasping/t_filter_beg_delay")
        self.T_FILTER_CEN_DELAY = rospy.get_param(
            "/overhead_grasping/t_filter_cen_delay")
        self.T_MIN_SIGMA = rospy.get_param("/overhead_grasping/t_min_sigma")
        self.T_MAX_SIGMA = rospy.get_param("/overhead_grasping/t_max_sigma")
        self.T_NUM_SIGMA = rospy.get_param("/overhead_grasping/t_num_sigma")
        self.t_sigma_list = np.linspace(self.T_MIN_SIGMA, self.T_MAX_SIGMA,
                                        self.T_NUM_SIGMA)
        self.T_DEGREE_DICT = rospy.get_param(
            "/overhead_grasping/t_degree_dict")
        self.t_instance_len = 0
        for k in self.PERCEPTION_ORDER:
            for coord in self.T_DEGREE_DICT[k]:
                for deg in coord:
                    self.t_instance_len += self.T_NUM_SIGMA
        self.T_RTREE_CLASSIFIER = rospy.get_param(
            "/overhead_grasping/t_rtree_classifier")

        self.STATIC_SIGMA_INDEX = rospy.get_param(
            "/overhead_grasping/static_sigma_index")
        self.STATIC_DERIV_THRESH = rospy.get_param(
            "/overhead_grasping/static_deriv_thresh")

    ##
    # Initializes the listeners on the sensor topics.  This must
    # be started before any data can be collected from the arms.
    def activate_sensing(self, tf_listener=None):

        log("Initializing arm perception listeners")
        self.tf_listener = tf_listener

        if "accelerometer" in self.percept_mon_list:
            accel_listener = GenericListener(
                "accel_mon_node", AccelerometerState,
                "accelerometer/" + self.armc + "_gripper_motor",
                self.SAMPLING_RATE, accel_state_processor)
            self.perceptions["accelerometer"] = accel_listener.read

        if "joint_angles" in self.percept_mon_list:
            joint_angles_processor = ft.partial(joints_state_processor,
                                                right_arm=self.is_right_arm,
                                                angles_velocities_efforts=0)
            joint_angles_listener = GenericListener("joint_angles_mon_node",
                                                    JointState, "joint_states",
                                                    self.SAMPLING_RATE,
                                                    joint_angles_processor)
            self.perceptions["joint_angles"] = joint_angles_listener.read

        if "joint_velocities" in self.percept_mon_list:
            joint_velocities_processor = ft.partial(
                joints_state_processor,
                right_arm=self.is_right_arm,
                angles_velocities_efforts=1)
            joint_velocities_listener = GenericListener(
                "joint_vels_mon_node", JointState, "joint_states",
                self.SAMPLING_RATE, joint_velocities_processor)
            self.perceptions[
                "joint_velocities"] = joint_velocities_listener.read

        if "joint_efforts" in self.percept_mon_list:
            joint_efforts_processor = ft.partial(joints_state_processor,
                                                 right_arm=self.is_right_arm,
                                                 angles_velocities_efforts=2)
            joint_efforts_listener = GenericListener("joint_efforts_mon_node",
                                                     JointState,
                                                     "joint_states",
                                                     self.SAMPLING_RATE,
                                                     joint_efforts_processor)
            self.perceptions["joint_efforts"] = joint_efforts_listener.read

        if "r_finger_periph_pressure" in self.percept_mon_list:
            r_finger_periph_pressure_processor = ft.partial(
                pressure_state_sum_processor,
                right_finger_tip=True,
                indicies=range(1, 7))
            r_finger_periph_pressure_listener = GenericListener(
                self.armc + "_pressure_r_periph_mon_node", PressureState,
                "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE,
                r_finger_periph_pressure_processor)
            self.perceptions[
                "r_finger_periph_pressure"] = r_finger_periph_pressure_listener.read

        if "r_finger_pad_pressure" in self.percept_mon_list:
            r_finger_pad_pressure_processor = ft.partial(
                pressure_state_sum_processor,
                right_finger_tip=True,
                indicies=range(7, 22))
            r_finger_pad_pressure_listener = GenericListener(
                self.armc + "_pressure_r_pad_mon_node", PressureState,
                "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE,
                r_finger_pad_pressure_processor)
            self.perceptions[
                "r_finger_pad_pressure"] = r_finger_pad_pressure_listener.read

        if "l_finger_periph_pressure" in self.percept_mon_list:
            l_finger_periph_pressure_processor = ft.partial(
                pressure_state_sum_processor,
                right_finger_tip=False,
                indicies=range(1, 7))
            l_finger_periph_pressure_listener = GenericListener(
                self.armc + "_pressure_l_periph_mon_node", PressureState,
                "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE,
                l_finger_periph_pressure_processor)
            self.perceptions[
                "l_finger_periph_pressure"] = l_finger_periph_pressure_listener.read

        if "l_finger_pad_pressure" in self.percept_mon_list:
            l_finger_pad_pressure_processor = ft.partial(
                pressure_state_sum_processor,
                right_finger_tip=False,
                indicies=range(7, 22))
            l_finger_pad_pressure_listener = GenericListener(
                self.armc + "_pressure_l_pad_mon_node", PressureState,
                "pressure/" + self.armc + "_gripper_motor", self.SAMPLING_RATE,
                l_finger_pad_pressure_processor)
            self.perceptions[
                "l_finger_pad_pressure"] = l_finger_pad_pressure_listener.read

        for k in self.perceptions:
            # Make sure we have recieved a message first
            self.perceptions[k](allow_duplication=False, willing_to_wait=True)
            self.perceptions[k] = ft.partial(self.perceptions[k],
                                             willing_to_wait=False,
                                             quiet=True,
                                             warn=False,
                                             allow_duplication=True)

        if "gripper_pose" in self.percept_mon_list:

            def gripper_pose_lookup():
                if self.tf_listener is not None:
                    lct = self.tf_listener.getLatestCommonTime(
                        "/torso_lift_link",
                        "/" + self.armc + "_wrist_roll_link")
                    pos, quat = self.tf_listener.lookupTransform(
                        "/torso_lift_link",
                        "/" + self.armc + "_wrist_roll_link", lct)
                    return (lct.to_sec(), np.array(pos + quat))
                else:
                    return (0., np.array([0.] * 7))

            self.perceptions["gripper_pose"] = gripper_pose_lookup

        # all callbacks should return data in this format:
        # (t, np.array([x1, x2, ...]))
        # t is time in seconds

        self.clear_vars()
        self.perception_started = True

        log("Finished initialization")

    ##
    # Initialize variables
    def clear_vars(self):
        self.datasets = {}
        self.models = {}
        for k in self.perceptions:
            self.datasets[k] = []
            self.models[k] = {"mean": None, "variance": None}

    ##
    # Begin capturing peception data for all of the listeners
    #
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def start_data_capture(self, duration=None):
        if not self.perception_started:
            err("Perception hasn't been started!")
            return False

        self.logger = RateCaller(self._gather_perception, self.SAMPLING_RATE)

        for k in self.perceptions:
            self.datasets[k] += [[]]
        self.logger.run()

        if not duration is None:
            threading.Timer(self.stop_data_capture, duration)

        return True

    def _gather_perception(self):
        for k in self.perceptions:
            self.datasets[k][-1].append(self.perceptions[k]())

    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    def stop_data_capture(self):
        self.logger.stop()
        num_datapts = len(self.datasets[self.datasets.keys()[0]][-1])
        log("%d datapoints collected", num_datapts)
        return num_datapts

    ##
    # do processing of signals in current coord group
    # sigs_proc contains a list of three signal sets
    # each corresponds to a list of convolutions of the signals
    # with a degree derivative of a gaussian with various
    # scales
    def process_signals(self, datasets):

        models = {}

        sum, num = 0., 0.
        for p in datasets:
            models[p] = {}
            i_sigs_proc = [[[] for x in range(self.I_NUM_SIGMA)]
                           for y in range(3)]
            t_sigs_proc = [[[] for x in range(self.T_NUM_SIGMA)]
                           for y in range(3)]
            data_list = datasets[p]
            num_coords = len(data_list[0][0][1])
            times = None
            for coord in range(num_coords):
                stream = data_list[0]

                def conv(v):
                    if type(v) == type([]) or type(v) == type(np.array([])):
                        return v[0]
                    return v

                signal = [conv(v) for v in zip(*zip(*stream)[1])[coord]]

                time_ind = np.array(zip(*stream)[0], dtype=np.float64)

                # normalize time indicies by offsetting to first value
                time_ind_beg = time_ind[0]
                for i, v in enumerate(time_ind):
                    time_ind[i] = float(time_ind[i] - time_ind_beg)

                times = time_ind
                for deg in range(3):
                    for sigma_ind, sigma in enumerate(self.i_sigma_list):
                        s = self.impact_fgc.convolve_signal(
                            signal, deg, sigma_ind)
                        i_sigs_proc[deg][sigma_ind].append(s)
                    for sigma_ind, sigma in enumerate(self.t_sigma_list):
                        s = self.type_fgc.convolve_signal(
                            signal, deg, sigma_ind)
                        t_sigs_proc[deg][sigma_ind].append(s)

            if rospy.is_shutdown():
                sys.exit()

            models[p]["impact_features"] = [[[]
                                             for x in range(self.I_NUM_SIGMA)]
                                            for y in range(3)]
            models[p]["type_features"] = [[[] for x in range(self.T_NUM_SIGMA)]
                                          for y in range(3)]
            for deg in range(3):
                for sigma in range(self.I_NUM_SIGMA):
                    models[p]["impact_features"][deg][sigma] = zip(
                        *i_sigs_proc[deg][sigma])
                for sigma in range(self.T_NUM_SIGMA):
                    models[p]["type_features"][deg][sigma] = zip(
                        *t_sigs_proc[deg][sigma])
            models[p]["time"] = times
            models[p]["i_sigma_list"] = self.i_sigma_list
            models[p]["t_sigma_list"] = self.t_sigma_list

        return models

    # def ready_collision_detection(self):
    #     # wait for the classifiers to finish loading
    #     self.impact_classifier.finish_loading()
    #     # self.coll_type_classifier.finish_loading()
    #     self.detect_ready = True

    ##
    # Begin monitoring peception data to make sure it doesn't deviate from
    # the model provided.
    #
    # TODO DOCS
    # @param duration If None, continue capturing until stop is called.
    #                 Else, stop capturing after duration seconds have passed.
    def begin_collision_detection(self,
                                  dynamic_detection=False,
                                  callback=None,
                                  static_collision_type=None,
                                  debug=False):

        if not self.perception_started:
            err("Perception hasn't been started!")
            return False
        # if not self.detect_ready:
        #     err("ready_collision_detection hasn't been called")
        #     return False

        self.is_dynamic = dynamic_detection
        self.callback = callback
        if not dynamic_detection:
            self._setup_collision_type(static_collision_type)
        self.collision_detected = False
        self.collision_finished = False
        self.debug = debug
        self.collision_type = "No collision"
        self.cur_iter = 0
        self.cur_inds = {}

        for k in self.perceptions:
            self.cur_inds[k] = 0

        self.i_data_buffers = {}
        self.t_data_buffers = {}
        for k in self.perceptions:
            self.i_data_buffers[k] = [
                np.zeros(
                    (self.impact_fgc.filter_len, self.impact_fgc.filter_len))
                for i in range(self.perception_lengths[k])
            ]
            self.t_data_buffers[k] = [
                np.zeros((self.type_fgc.filter_len, self.type_fgc.filter_len))
                for i in range(self.perception_lengths[k])
            ]
        self.end_monitor_lock = threading.Lock()

        self.data_thread_spawner_lock1 = threading.Lock()
        self.data_thread_spawner_lock2 = threading.Lock()
        self.data_thread_spawner = RateCaller(self._data_spawn_thread,
                                              self.SAMPLING_RATE)
        self.data_thread_spawner.run()

        self.beg_time = rospy.Time.now().to_sec()

        self.t_sum, self.t_num = 0., 0.
        self.normal_dict_counts = []
        self.temp_dict = {}
        for k in self.perceptions:
            self.temp_dict[k] = [[] for i in range(self.perception_lengths[k])]

        return True

    def _setup_collision_type(self, static_collision_type):
        if static_collision_type == "all":
            static_dict = {
                "accelerometer": range(3),
                "joint_angles": range(7),
                "joint_velocities": range(7),
                "joint_efforts": range(7),
                "r_finger_periph_pressure": range(1),
                "r_finger_pad_pressure": range(1),
                "l_finger_periph_pressure": range(1),
                "l_finger_pad_pressure": range(1),
                "gripper_pose": range(7)
            }
        elif static_collision_type in self.perception_names:
            static_dict = {
                static_collision_type:
                range(self.perception_lengths[static_collision_type])
            }
        elif static_collision_type == "pressure":
            static_dict = {
                "r_finger_periph_pressure": range(1),
                "r_finger_pad_pressure": range(1),
                "l_finger_periph_pressure": range(1),
                "l_finger_pad_pressure": range(1)
            }
        elif type(static_collision_type) == type({}):
            static_dict = static_collision_type
        else:
            err("Bad static_collision_type")
        self.static_dict = static_dict

    def _data_spawn_thread(self):
        # only one thread allowed to wait at a time
        # all other threads will pass through
        if self.data_thread_spawner_lock1.acquire(False):
            if self.data_thread_spawner_lock2.acquire(True):
                self.data_thread_spawner_lock1.acquire(False)
                self.data_thread_spawner_lock1.release()
                mt = self.DataThread(self)
                mt.start()

    class DataThread(threading.Thread):
        def __init__(self, apm):
            threading.Thread.__init__(self)
            self.apm = apm

        def run(self):
            self.apm._monitor_data_collector()
            self.apm.data_thread_spawner_lock2.acquire(False)
            self.apm.data_thread_spawner_lock2.release()

    def _monitor_data_collector(self):
        st_time = rospy.Time.now().to_sec()
        #########################################
        if rospy.is_shutdown():
            self._trigger_collision()
            self._finish_collision("rospy shutdown")
        add_data = []
        for k in self.perceptions:
            p_vals = self.perceptions[k]()[1]
            for i, v in enumerate(p_vals):
                # populate impact collision buffers
                for b in range(self.impact_fgc.filter_len - 1, -1, -1):
                    self.i_data_buffers[k][i][(b - self.cur_iter) %
                                              self.impact_fgc.filter_len,
                                              b] = v
                    if self.debug:
                        if b == 0 and k == "joint_angles":  # and ((i == 0) or (i == 1)):
                            print v,
                # populate collision type detection buffers
                for b in range(self.type_fgc.filter_len - 1, -1, -1):
                    self.t_data_buffers[k][i][(b - self.cur_iter) %
                                              self.type_fgc.filter_len, b] = v

        if self.debug:
            print

        if self.is_dynamic:
            if not self.collision_detected:
                self._dynamic_collision_detection()
            else:
                self._dynamic_collision_classifying()
        else:
            self._static_collision_detection()
        self.cur_iter += 1
        #########################################
        end_time = rospy.Time.now().to_sec()
        self.t_sum += end_time - st_time
        self.t_num += 1.

    def _dynamic_collision_detection(self):

        if self.cur_iter < self.impact_fgc.filter_len:
            return
        instance = np.zeros((self.i_instance_len, 1))
        i_place = 0
        for k in self.PERCEPTION_ORDER:
            for coord in range(len(self.i_data_buffers[k])):
                for deg in range(3):
                    if deg not in self.I_DEGREE_DICT[k][coord]:
                        continue
                    for sigma_i, sigma_t in enumerate(self.i_sigma_list):
                        val = self.impact_fgc.convolve_pt(
                            self.i_data_buffers[k][coord][(
                                self.cur_iter % self.impact_fgc.filter_len)],
                            deg, sigma_i)
                        instance[i_place, 0] = val
                        i_place += 1

        has_collided = self.impact_classifier.predict(instance)

        if has_collided and not self.debug:
            if not self.collision_detected:
                self._trigger_collision()

    def _dynamic_collision_classifying(self):

        if self.cur_iter < self.type_fgc.filter_len:
            # TODO HANDLE THIS CASE
            return
        if ((self.cur_iter - self.coll_iter) > self.type_fgc.filter_len / 2):
            return
        instance = np.zeros((self.t_instance_len, 1))
        i_place = 0
        for k in self.PERCEPTION_ORDER:
            for coord in range(len(self.t_data_buffers[k])):
                for deg in range(3):
                    if deg not in self.T_DEGREE_DICT[k][coord]:
                        continue
                    for sigma_i, sigma_t in enumerate(self.t_sigma_list):
                        val = self.type_fgc.convolve_pt(
                            self.t_data_buffers[k][coord][(
                                self.cur_iter % self.type_fgc.filter_len)],
                            deg, sigma_i)
                        instance[i_place, 0] = val
                        i_place += 1

        collision_class = self.coll_type_classifier.predict(instance)

        if collision_class == 1.:
            type = "Environmental collision"
        else:
            type = "Table collision"
        self._finish_collision(type)

    def _static_collision_detection(self):
        if self.cur_iter < self.impact_fgc.filter_len:
            return
        for k in self.PERCEPTION_ORDER:
            if k not in self.static_dict:
                continue
            for coord in self.static_dict[k]:
                if k == "gripper_pose" and coord >= 3:
                    # Quaternions are unreliable, this should be fixed
                    # at some point
                    if self.debug:
                        self.temp_dict[k][coord].append(0.)
                    continue
                val = self.impact_fgc.convolve_pt(
                    self.i_data_buffers[k][coord][self.cur_iter %
                                                  self.impact_fgc.filter_len],
                    1, self.STATIC_SIGMA_INDEX)
                if self.debug:
                    self.temp_dict[k][coord].append(val)
                if (np.fabs(val) >= self.STATIC_DERIV_THRESH[k][coord]
                        and not self.debug):
                    print "Collision:", val, "Thresh:", self.STATIC_DERIV_THRESH[
                        k][coord]
                    self._trigger_collision()
                    self._finish_collision(k + " collision", coord)
                # if k == "r_finger_pad_pressure" and random.randint(0,40) == 0:
                #     print val

    def _trigger_collision(self):
        self.collision_detected = True
        self.coll_iter = self.cur_iter - self.impact_fgc.filter_len / 2
        if not self.callback is None:
            self.callback()

    def _finish_collision(self, type="Unlabled", coord=0):
        if not self.collision_finished:
            print '-' * 60
            print
            print '                       %s detected' % type
            print
            print '-' * 60
            log("%s reported" % type)
            self.collision_type = type
            self.collision_coord = coord
            if not self.callback is None:
                self.callback()
            self.collision_finished = True
            self.end_collision_detection()

    ##
    # Stop capturing perception data.  Store output data in datasets list for
    # later statistics.
    def end_collision_detection(self):
        while (not self.collision_finished and self.collision_detected
               and not rospy.is_shutdown()):
            rospy.sleep(0.01)
        with self.end_monitor_lock:
            if not self.data_thread_spawner is None:
                print "Avg cvpt: %1.9f" % (self.t_sum / self.t_num)
                print "cvpt sum: %1.5f" % (self.t_sum)
                print self.t_num
                self.data_thread_spawner.stop()
                self.data_thread_spawner_lock1.acquire(False)
                self.data_thread_spawner_lock1.release()
                self.data_thread_spawner_lock2.acquire(False)
                self.data_thread_spawner_lock2.release()
                self.data_thread_spawner = None
            return