예제 #1
0
 def message_handler(self, m):
     receive_time = time.time()
     typ = m.get_type()
     #buffer vehicle location
     if typ == 'GLOBAL_POSITION_INT':
         timestamp = m.time_boot_ms * 1000  #usec
         (lat, lon, alt) = (m.lat / 1.0e7, m.lat / 1.0e7,
                            m.relative_alt / 1000.0)
         (vx, vy, vz) = (m.vx / 100.0, m.vy / 100.0, m.vz / 100.0
                         )  #meter/sec
         loc = Location(lat, lon, alt)
         vel = Point3(vx, vy, vz)
         self.vehicle_pos.put_location(timestamp, loc, vel)
     #buffer vehicle attitude
     if typ == 'ATTITUDE':
         timestamp = m.time_boot_ms * 1000  #usec
         att = Attitude(m.pitch, m.yaw, m.roll)  #radians
         vel = Point3(m.rollspeed, m.pitchspeed, m.yawspeed)  #rad/sec
         self.vehicle_pos.put_attitude(timestamp, att, vel)
     #attempt to sync system time
     if typ == 'SYSTEM_TIME':
         timestamp = m.time_boot_ms * 1000  #usec
         diff = (receive_time * 1e6) - timestamp
         #Use shift where USB delay is smallest. This does not account for clock drift!
         if self.vehicle_time_diff is None or diff < self.vehicle_time_diff:
             self.vehicle_time_diff = diff
예제 #2
0
    def test(self):

        self.put_location(0, Location(37.691163, -122.155766, 10),
                          Point3(30, 10, 2))
        self.put_location(6000000, Location(37.691295, -122.151861, 20),
                          Point3(-30, 10, 3))
        self.put_location(10000000, Location(37.690735, -122.152164, 20),
                          Point3(5, -6, -10))
        for i in range(0, 10000000, 1000000):
            print self.get_location(i).lat, ',', self.get_location(i).lon
        #plot points on http://www.darrinward.com/lat-long/

        self.put_attitude(0, Attitude(30, 60, 10), Point3(30, 10, 2))
        self.put_attitude(6000000, Attitude(30, 80, 20), Point3(-30, 10, 3))
        self.put_attitude(10000000, Attitude(30, 90, 20), Point3(5, -6, -10))
        for i in range(0, 10000000, 1000000):
            print self.get_attitude(i).roll, ',', self.get_attitude(i).pitch
예제 #3
0
    def __init__(self):
        self.size = 50
        self.location_buffer = np.empty((self.size), object)
        self.attitude_buffer = np.empty((self.size), object)
        self.lb_index = 0
        self.ab_index = 0

        #load buffers
        for i in range(0, self.size):
            self.put_location(0, Location(0, 0, 0), Point3(0, 0, 0))
            self.put_attitude(0, Attitude(0, 0, 0), Point3(0, 0, 0))
예제 #4
0
 def attitude(self):
     return Attitude(self.__module.pitch, self.__module.yaw, self.__module.roll)
    def get_attitude(self, desired_time_in_sec):
        # return current attitude immediately if dictionary is empty
        if len(self.att_dict) == 0:
            return self.vehicle.attitude

        # get times from dict
        keys = sorted(self.att_dict.keys())

        # debug
        #print "AttDict looking for %f" % desired_time_in_sec

        # initialise best before and after keys
        key_before = keys[0]
        time_diff_before = (key_before - desired_time_in_sec)
        key_after = keys[len(keys)-1]
        time_diff_after = (key_after - desired_time_in_sec)

        # handle case where we hit the time exactly or the time is beyond the end
        if (time_diff_before >= 0):
            #debug
            #print "Time %f was before first entry's time %f" % (desired_time_in_sec, key_before)
            return self.att_dict[key_before]
        if (time_diff_after <= 0):
            #debug
            #print "Time %f was after last entry's time %f" % (desired_time_in_sec, key_after)
            return self.att_dict[key_after]

        # iteration through attitude dictionary
        for t in keys:
            # calc this dictionary entry's time diff from the desired time
            time_diff = t - desired_time_in_sec

            # if before the desired time but after the current best 'before' time use it
            if time_diff <= 0 and time_diff > time_diff_before:
                time_diff_before = time_diff
                key_before = t

            # if after the desired time but before the current best 'before' time use it
            if time_diff >= 0 and time_diff < time_diff_after:
                time_diff_after = time_diff
                key_after = t

        # calc time between before and after attitudes
        tot_time_diff = -time_diff_before + time_diff_after

        # debug
        if (tot_time_diff <= 0):
            print "Div By Zero!"
            print "des time:%f" % desired_time_in_sec
            print "num keys:%d" % len(keys)
            print "key bef:%f aft:%f" % (key_before, key_after)
            print "keys: %s" % keys

        # get attitude before and after
        att_before = self.att_dict[key_before]
        att_after = self.att_dict[key_after]

        # interpolate roll, pitch and yaw values
        interp_val = (-time_diff_before / tot_time_diff)
        roll = wrap_PI(att_before.roll + (wrap_PI(att_after.roll - att_before.roll) * interp_val))
        pitch = att_before.pitch + (att_after.pitch - att_before.pitch) * interp_val
        yaw = wrap_PI(att_before.yaw + (wrap_PI(att_after.yaw - att_before.yaw) * interp_val))

        ret_att = Attitude(pitch, yaw, roll)

        return ret_att
예제 #6
0
    def run(self):

        VN_logger.text(VN_logger.GENERAL, 'Running {0}'.format(self.name()))

        #start a video capture
        if (self.simulator):
            VN_logger.text(VN_logger.GENERAL, 'Using simulator')
            sim.load_target(self.target_file, self.target_size)
            sim.set_target_location(veh_control.get_home(True))
            #sim.set_target_location(Location(0,0,0))

        else:
            VN_video.start_capture(self.camera_index)

        #create an image processor
        #detector = CircleDetector()
        detector = TopCode()

        #create a queue for images
        imageQueue = []

        #initilize autopilot variables
        location = Location(0, 0, 0)
        attitude = Attitude(0, 0, 0)

        while veh_control.is_connected():
            '''
            #kill camera for testing
            if(cv2.waitKey(2) == 1113938):
                self.kill_camera =  not self.kill_camera
            '''

            #we are in the landing zone or in a landing mode and we are still running the landing program
            #just because the program is running does not mean it controls the vehicle
            #i.e. in the landing area but not in a landing mode
            #FIXME add inside_landing_area() back to conditional
            if (self.always_run) or (veh_control.get_mode()
                                     == "LAND") or (veh_control.get_mode()
                                                    == "RTL"):

                #update how often we dispatch a command
                VN_dispatcher.calculate_dispatch_schedule()

                #update simulator
                if (self.simulator):
                    #get info from autopilot
                    location = veh_control.get_location()
                    attitude = veh_control.get_attitude()
                    sim.refresh_simulator(location, attitude)

                # grab an image
                frame = None
                capStart = current_milli_time()
                if (self.simulator):
                    frame = sim.get_frame()
                else:
                    frame = VN_video.get_image()
                capStop = current_milli_time()
                '''
                if(self.kill_camera):
                    frame[:] = (0,255,0)
                '''

                #update capture time
                VN_dispatcher.update_capture_time(capStop - capStart)

                #Process image
                #We schedule the process as opposed to waiting for an available core
                #This brings consistancy and prevents overwriting a dead process before
                #information has been grabbed from the Pipe
                if VN_dispatcher.is_ready():
                    #queue the image for later use: displaying image, overlays, recording
                    imageQueue.append((frame, self.frames_captured))

                    #the function must be run directly from the class
                    VN_dispatcher.dispatch(target=detector.analyze_frame_async,
                                           args=(frame, self.frames_captured))

                    self.frames_captured += 1

                #retreive results
                if VN_dispatcher.is_available():

                    #results of image processor
                    results = VN_dispatcher.retreive()

                    # get image that was passed with the image processor
                    for f in imageQueue:
                        img, frame_id = f
                        if results[0] == frame_id:
                            imageQueue.remove(f)
                            break
                    VN_logger.text(VN_logger.GENERAL,
                                   'Frame {0}'.format(frame_id))

                    #overlay gui
                    #rend_Image = gui.add_target_highlights(img, results[3])
                    rend_Image = gui.add_ring_highlights(img, results[4])

                    #show/record images
                    VN_logger.image(VN_logger.RAW, img)
                    VN_logger.image(VN_logger.GUI, rend_Image)

                    #display/log data
                    VN_logger.text(
                        VN_logger.ALGORITHM,
                        'RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'
                        .format(results[1], results[2], results[3],
                                results[4]))
                    VN_logger.text(
                        VN_logger.DEBUG,
                        'Rings detected: {0}'.format(len(results[4])))

                    #send results if we found the landing pad
                    if (results[2] is not None):
                        #shift origin to center of the image
                        x_pixel = results[2][0] - (self.camera_width / 2.0)
                        y_pixel = results[2][1] - (
                            self.camera_height / 2.0
                        )  #y-axis is inverted??? Works with arducopter

                        #convert target location to angular radians
                        x_angle = x_pixel * (self.camera_hfov /
                                             self.camera_width) * (math.pi /
                                                                   180.0)
                        y_angle = y_pixel * (self.camera_vfov /
                                             self.camera_height) * (math.pi /
                                                                    180.0)

                        #send commands to autopilot
                        veh_control.report_landing_target(
                            x_angle, y_angle, results[3], 0, 0)

            else:
                VN_logger.text(VN_logger.GENERAL, 'Not in landing mode')

        #terminate program
        VN_logger.text(VN_logger.GENERAL,
                       'Vehicle disconnected, Program Terminated')
        if (self.simulator == False):
            VN_video.stop_capture()
예제 #7
0
 def get_attitude(self, timestamp):
     pnt = self._interpolate(self.attitude_buffer, timestamp)
     if pnt is not None:
         return Attitude(pnt.y, pnt.z, pnt.x)
     return None
예제 #8
0
    def run(self):
        #start a video capture
        self.frames_captured = 0 #how many frames have been captured
        if(self.use_simulator):
            self.logger.text(self.logger.GENERAL, 'Using simulator')
            self.sim = PrecisionLandSimulator(self.config)
            self.sim.set_target_location(self.veh_control.get_home(True))
        else:
            self.video.start_capture()


        #create an image processor
        detector = Ring_Detector(self.config)
        self.last_target = None

        #create a queue for images
        imageQueue = []

        #initilize autopilot variables
        location = Location(0,0,0)
        attitude = Attitude(0,0,0)

        #control the vehicle state in the background
        self.land_control = Land_Control(self.config, self.veh_control)
        control_thread = Threader(target=self.land_control.update, args=None,iterations = -1)

        while self.veh_control.is_connected():

            #start thread if it hasn't been started
            if not control_thread.is_alive():
                control_thread.start()

            #we are in a landing mode and we are still running the landing program
            if self.always_run or (self.veh_control.get_mode() == "LAND" or self.veh_control.get_mode() == "RTL" or self.veh_control.get_mode() == "GUIDED"):
                start = time.time()
                #grab vehicle pos
                ret, location = self.veh_control.get_location()
                ret, attitude = self.veh_control.get_attitude()
                altitude = location.alt #alt above terrain not used...yet
                timestamp = 0 # timestamp when frame was grabbed in vehicle time
                perf.enter()
                # grab an image
                frame = None
                if(self.use_simulator):
                    self.sim.set_target_location(self.veh_control.get_home(True))
                    self.sim.refresh_simulator(location,attitude)
                    ret,frame = self.sim.get_image()
                else:
                    #timestamp = self.veh_control.time_us()
                    ret,frame = self.video.get_image()

                if(len(frame.shape)>2):
                    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
                perf.exit(function = 'get_image')

                perf.enter()
                small_frame, scale = self.preprocess(frame, 0.5 ,altitude)
                perf.exit(function='reduce_frame')
                #run target detector
                perf.enter()
                results = detector.analyze_frame(small_frame,self.frames_captured,timestamp,altitude)
                perf.exit(function='analyze_frame')
                perf.enter()
                #process image data
                self.last_target = self.process_results(results, small_frame,scale)
                perf.exit(function='process_results')
                #perf.print_package("PrecisionLand")
                perf.clear()
                self.frames_captured += 1
                stop = time.time()
                print "fps", round(1.0/(stop-start),1)
            else:
                self.logger.text(self.logger.GENERAL, 'Not in landing mode')
                time.sleep(0.5)

        #terminate program
        self.logger.text(self.logger.GENERAL, 'Vehicle disconnected, Program Terminated')
        Threader.stop_all()
        if(self.use_simulator == False):
            self.video.stop_capture()